]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/arm64/kvm/sys_regs.c
arm64: KVM: Add access handler for PMSELR register
[mirror_ubuntu-artful-kernel.git] / arch / arm64 / kvm / sys_regs.c
CommitLineData
7c8c5e6a
MZ
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/kvm/coproc.c:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Authors: Rusty Russell <rusty@rustcorp.com.au>
8 * Christoffer Dall <c.dall@virtualopensystems.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22
7c8c5e6a 23#include <linux/kvm_host.h>
c6d01a94 24#include <linux/mm.h>
7c8c5e6a 25#include <linux/uaccess.h>
c6d01a94 26
7c8c5e6a
MZ
27#include <asm/cacheflush.h>
28#include <asm/cputype.h>
0c557ed4 29#include <asm/debug-monitors.h>
c6d01a94
MR
30#include <asm/esr.h>
31#include <asm/kvm_arm.h>
9d8415d6 32#include <asm/kvm_asm.h>
c6d01a94
MR
33#include <asm/kvm_coproc.h>
34#include <asm/kvm_emulate.h>
35#include <asm/kvm_host.h>
36#include <asm/kvm_mmu.h>
ab946834 37#include <asm/perf_event.h>
c6d01a94 38
7c8c5e6a
MZ
39#include <trace/events/kvm.h>
40
41#include "sys_regs.h"
42
eef8c85a
AB
43#include "trace.h"
44
7c8c5e6a
MZ
45/*
46 * All of this file is extremly similar to the ARM coproc.c, but the
47 * types are different. My gut feeling is that it should be pretty
48 * easy to merge, but that would be an ABI breakage -- again. VFP
49 * would also need to be abstracted.
62a89c44
MZ
50 *
51 * For AArch32, we only take care of what is being trapped. Anything
52 * that has to do with init and userspace access has to go via the
53 * 64bit interface.
7c8c5e6a
MZ
54 */
55
56/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
57static u32 cache_levels;
58
59/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
60#define CSSELR_MAX 12
61
62/* Which cache CCSIDR represents depends on CSSELR value. */
63static u32 get_ccsidr(u32 csselr)
64{
65 u32 ccsidr;
66
67 /* Make sure noone else changes CSSELR during this! */
68 local_irq_disable();
69 /* Put value into CSSELR */
70 asm volatile("msr csselr_el1, %x0" : : "r" (csselr));
71 isb();
72 /* Read result out of CCSIDR */
73 asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr));
74 local_irq_enable();
75
76 return ccsidr;
77}
78
3c1e7165
MZ
79/*
80 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
81 */
7c8c5e6a 82static bool access_dcsw(struct kvm_vcpu *vcpu,
3fec037d 83 struct sys_reg_params *p,
7c8c5e6a
MZ
84 const struct sys_reg_desc *r)
85{
7c8c5e6a
MZ
86 if (!p->is_write)
87 return read_from_write_only(vcpu, p);
88
3c1e7165 89 kvm_set_way_flush(vcpu);
7c8c5e6a
MZ
90 return true;
91}
92
4d44923b
MZ
93/*
94 * Generic accessor for VM registers. Only called as long as HCR_TVM
3c1e7165
MZ
95 * is set. If the guest enables the MMU, we stop trapping the VM
96 * sys_regs and leave it in complete control of the caches.
4d44923b
MZ
97 */
98static bool access_vm_reg(struct kvm_vcpu *vcpu,
3fec037d 99 struct sys_reg_params *p,
4d44923b
MZ
100 const struct sys_reg_desc *r)
101{
3c1e7165 102 bool was_enabled = vcpu_has_cache_enabled(vcpu);
4d44923b
MZ
103
104 BUG_ON(!p->is_write);
105
dedf97e8 106 if (!p->is_aarch32) {
2ec5be3d 107 vcpu_sys_reg(vcpu, r->reg) = p->regval;
dedf97e8
MZ
108 } else {
109 if (!p->is_32bit)
2ec5be3d
PF
110 vcpu_cp15_64_high(vcpu, r->reg) = upper_32_bits(p->regval);
111 vcpu_cp15_64_low(vcpu, r->reg) = lower_32_bits(p->regval);
dedf97e8 112 }
f0a3eaff 113
3c1e7165 114 kvm_toggle_cache(vcpu, was_enabled);
4d44923b
MZ
115 return true;
116}
117
6d52f35a
AP
118/*
119 * Trap handler for the GICv3 SGI generation system register.
120 * Forward the request to the VGIC emulation.
121 * The cp15_64 code makes sure this automatically works
122 * for both AArch64 and AArch32 accesses.
123 */
124static bool access_gic_sgi(struct kvm_vcpu *vcpu,
3fec037d 125 struct sys_reg_params *p,
6d52f35a
AP
126 const struct sys_reg_desc *r)
127{
6d52f35a
AP
128 if (!p->is_write)
129 return read_from_write_only(vcpu, p);
130
2ec5be3d 131 vgic_v3_dispatch_sgi(vcpu, p->regval);
6d52f35a
AP
132
133 return true;
134}
135
7609c125 136static bool trap_raz_wi(struct kvm_vcpu *vcpu,
3fec037d 137 struct sys_reg_params *p,
7609c125 138 const struct sys_reg_desc *r)
7c8c5e6a
MZ
139{
140 if (p->is_write)
141 return ignore_write(vcpu, p);
142 else
143 return read_zero(vcpu, p);
144}
145
0c557ed4 146static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
3fec037d 147 struct sys_reg_params *p,
0c557ed4
MZ
148 const struct sys_reg_desc *r)
149{
150 if (p->is_write) {
151 return ignore_write(vcpu, p);
152 } else {
2ec5be3d 153 p->regval = (1 << 3);
0c557ed4
MZ
154 return true;
155 }
156}
157
158static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
3fec037d 159 struct sys_reg_params *p,
0c557ed4
MZ
160 const struct sys_reg_desc *r)
161{
162 if (p->is_write) {
163 return ignore_write(vcpu, p);
164 } else {
165 u32 val;
166 asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val));
2ec5be3d 167 p->regval = val;
0c557ed4
MZ
168 return true;
169 }
170}
171
172/*
173 * We want to avoid world-switching all the DBG registers all the
174 * time:
175 *
176 * - If we've touched any debug register, it is likely that we're
177 * going to touch more of them. It then makes sense to disable the
178 * traps and start doing the save/restore dance
179 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
180 * then mandatory to save/restore the registers, as the guest
181 * depends on them.
182 *
183 * For this, we use a DIRTY bit, indicating the guest has modified the
184 * debug registers, used as follow:
185 *
186 * On guest entry:
187 * - If the dirty bit is set (because we're coming back from trapping),
188 * disable the traps, save host registers, restore guest registers.
189 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
190 * set the dirty bit, disable the traps, save host registers,
191 * restore guest registers.
192 * - Otherwise, enable the traps
193 *
194 * On guest exit:
195 * - If the dirty bit is set, save guest registers, restore host
196 * registers and clear the dirty bit. This ensure that the host can
197 * now use the debug registers.
198 */
199static bool trap_debug_regs(struct kvm_vcpu *vcpu,
3fec037d 200 struct sys_reg_params *p,
0c557ed4
MZ
201 const struct sys_reg_desc *r)
202{
203 if (p->is_write) {
2ec5be3d 204 vcpu_sys_reg(vcpu, r->reg) = p->regval;
0c557ed4
MZ
205 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
206 } else {
2ec5be3d 207 p->regval = vcpu_sys_reg(vcpu, r->reg);
0c557ed4
MZ
208 }
209
2ec5be3d 210 trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
eef8c85a 211
0c557ed4
MZ
212 return true;
213}
214
84e690bf
AB
215/*
216 * reg_to_dbg/dbg_to_reg
217 *
218 * A 32 bit write to a debug register leave top bits alone
219 * A 32 bit read from a debug register only returns the bottom bits
220 *
221 * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
222 * hyp.S code switches between host and guest values in future.
223 */
281243cb
MZ
224static void reg_to_dbg(struct kvm_vcpu *vcpu,
225 struct sys_reg_params *p,
226 u64 *dbg_reg)
84e690bf 227{
2ec5be3d 228 u64 val = p->regval;
84e690bf
AB
229
230 if (p->is_32bit) {
231 val &= 0xffffffffUL;
232 val |= ((*dbg_reg >> 32) << 32);
233 }
234
235 *dbg_reg = val;
236 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
237}
238
281243cb
MZ
239static void dbg_to_reg(struct kvm_vcpu *vcpu,
240 struct sys_reg_params *p,
241 u64 *dbg_reg)
84e690bf 242{
2ec5be3d 243 p->regval = *dbg_reg;
84e690bf 244 if (p->is_32bit)
2ec5be3d 245 p->regval &= 0xffffffffUL;
84e690bf
AB
246}
247
281243cb
MZ
248static bool trap_bvr(struct kvm_vcpu *vcpu,
249 struct sys_reg_params *p,
250 const struct sys_reg_desc *rd)
84e690bf
AB
251{
252 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
253
254 if (p->is_write)
255 reg_to_dbg(vcpu, p, dbg_reg);
256 else
257 dbg_to_reg(vcpu, p, dbg_reg);
258
eef8c85a
AB
259 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
260
84e690bf
AB
261 return true;
262}
263
264static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
265 const struct kvm_one_reg *reg, void __user *uaddr)
266{
267 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
268
1713e5aa 269 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
84e690bf
AB
270 return -EFAULT;
271 return 0;
272}
273
274static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
275 const struct kvm_one_reg *reg, void __user *uaddr)
276{
277 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
278
279 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
280 return -EFAULT;
281 return 0;
282}
283
281243cb
MZ
284static void reset_bvr(struct kvm_vcpu *vcpu,
285 const struct sys_reg_desc *rd)
84e690bf
AB
286{
287 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
288}
289
281243cb
MZ
290static bool trap_bcr(struct kvm_vcpu *vcpu,
291 struct sys_reg_params *p,
292 const struct sys_reg_desc *rd)
84e690bf
AB
293{
294 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
295
296 if (p->is_write)
297 reg_to_dbg(vcpu, p, dbg_reg);
298 else
299 dbg_to_reg(vcpu, p, dbg_reg);
300
eef8c85a
AB
301 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
302
84e690bf
AB
303 return true;
304}
305
306static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
307 const struct kvm_one_reg *reg, void __user *uaddr)
308{
309 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
310
1713e5aa 311 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
84e690bf
AB
312 return -EFAULT;
313
314 return 0;
315}
316
317static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
318 const struct kvm_one_reg *reg, void __user *uaddr)
319{
320 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
321
322 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
323 return -EFAULT;
324 return 0;
325}
326
281243cb
MZ
327static void reset_bcr(struct kvm_vcpu *vcpu,
328 const struct sys_reg_desc *rd)
84e690bf
AB
329{
330 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
331}
332
281243cb
MZ
333static bool trap_wvr(struct kvm_vcpu *vcpu,
334 struct sys_reg_params *p,
335 const struct sys_reg_desc *rd)
84e690bf
AB
336{
337 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
338
339 if (p->is_write)
340 reg_to_dbg(vcpu, p, dbg_reg);
341 else
342 dbg_to_reg(vcpu, p, dbg_reg);
343
eef8c85a
AB
344 trace_trap_reg(__func__, rd->reg, p->is_write,
345 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
346
84e690bf
AB
347 return true;
348}
349
350static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
351 const struct kvm_one_reg *reg, void __user *uaddr)
352{
353 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
354
1713e5aa 355 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
84e690bf
AB
356 return -EFAULT;
357 return 0;
358}
359
360static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
361 const struct kvm_one_reg *reg, void __user *uaddr)
362{
363 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
364
365 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
366 return -EFAULT;
367 return 0;
368}
369
281243cb
MZ
370static void reset_wvr(struct kvm_vcpu *vcpu,
371 const struct sys_reg_desc *rd)
84e690bf
AB
372{
373 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
374}
375
281243cb
MZ
376static bool trap_wcr(struct kvm_vcpu *vcpu,
377 struct sys_reg_params *p,
378 const struct sys_reg_desc *rd)
84e690bf
AB
379{
380 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
381
382 if (p->is_write)
383 reg_to_dbg(vcpu, p, dbg_reg);
384 else
385 dbg_to_reg(vcpu, p, dbg_reg);
386
eef8c85a
AB
387 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
388
84e690bf
AB
389 return true;
390}
391
392static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
393 const struct kvm_one_reg *reg, void __user *uaddr)
394{
395 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
396
1713e5aa 397 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
84e690bf
AB
398 return -EFAULT;
399 return 0;
400}
401
402static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
403 const struct kvm_one_reg *reg, void __user *uaddr)
404{
405 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
406
407 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
408 return -EFAULT;
409 return 0;
410}
411
281243cb
MZ
412static void reset_wcr(struct kvm_vcpu *vcpu,
413 const struct sys_reg_desc *rd)
84e690bf
AB
414{
415 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
416}
417
7c8c5e6a
MZ
418static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
419{
420 u64 amair;
421
422 asm volatile("mrs %0, amair_el1\n" : "=r" (amair));
423 vcpu_sys_reg(vcpu, AMAIR_EL1) = amair;
424}
425
426static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
427{
4429fc64
AP
428 u64 mpidr;
429
7c8c5e6a 430 /*
4429fc64
AP
431 * Map the vcpu_id into the first three affinity level fields of
432 * the MPIDR. We limit the number of VCPUs in level 0 due to a
433 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
434 * of the GICv3 to be able to address each CPU directly when
435 * sending IPIs.
7c8c5e6a 436 */
4429fc64
AP
437 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
438 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
439 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
440 vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr;
7c8c5e6a
MZ
441}
442
ab946834
SZ
443static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
444{
445 u64 pmcr, val;
446
447 asm volatile("mrs %0, pmcr_el0\n" : "=r" (pmcr));
448 /* Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) is reset to UNKNOWN
449 * except PMCR.E resetting to zero.
450 */
451 val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
452 | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
453 vcpu_sys_reg(vcpu, PMCR_EL0) = val;
454}
455
456static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
457 const struct sys_reg_desc *r)
458{
459 u64 val;
460
461 if (!kvm_arm_pmu_v3_ready(vcpu))
462 return trap_raz_wi(vcpu, p, r);
463
464 if (p->is_write) {
465 /* Only update writeable bits of PMCR */
466 val = vcpu_sys_reg(vcpu, PMCR_EL0);
467 val &= ~ARMV8_PMU_PMCR_MASK;
468 val |= p->regval & ARMV8_PMU_PMCR_MASK;
469 vcpu_sys_reg(vcpu, PMCR_EL0) = val;
470 } else {
471 /* PMCR.P & PMCR.C are RAZ */
472 val = vcpu_sys_reg(vcpu, PMCR_EL0)
473 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
474 p->regval = val;
475 }
476
477 return true;
478}
479
3965c3ce
SZ
480static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
481 const struct sys_reg_desc *r)
482{
483 if (!kvm_arm_pmu_v3_ready(vcpu))
484 return trap_raz_wi(vcpu, p, r);
485
486 if (p->is_write)
487 vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
488 else
489 /* return PMSELR.SEL field */
490 p->regval = vcpu_sys_reg(vcpu, PMSELR_EL0)
491 & ARMV8_PMU_COUNTER_MASK;
492
493 return true;
494}
495
0c557ed4
MZ
496/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
497#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
498 /* DBGBVRn_EL1 */ \
499 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \
84e690bf 500 trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \
0c557ed4
MZ
501 /* DBGBCRn_EL1 */ \
502 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \
84e690bf 503 trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \
0c557ed4
MZ
504 /* DBGWVRn_EL1 */ \
505 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \
84e690bf 506 trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \
0c557ed4
MZ
507 /* DBGWCRn_EL1 */ \
508 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \
84e690bf 509 trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
0c557ed4 510
7c8c5e6a
MZ
511/*
512 * Architected system registers.
513 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
7609c125
MZ
514 *
515 * We could trap ID_DFR0 and tell the guest we don't support performance
516 * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
517 * NAKed, so it will read the PMCR anyway.
518 *
519 * Therefore we tell the guest we have 0 counters. Unfortunately, we
520 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
521 * all PM registers, which doesn't crash the guest kernel at least.
522 *
0c557ed4
MZ
523 * Debug handling: We do trap most, if not all debug related system
524 * registers. The implementation is good enough to ensure that a guest
525 * can use these with minimal performance degradation. The drawback is
526 * that we don't implement any of the external debug, none of the
527 * OSlock protocol. This should be revisited if we ever encounter a
528 * more demanding guest...
7c8c5e6a
MZ
529 */
530static const struct sys_reg_desc sys_reg_descs[] = {
531 /* DC ISW */
532 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010),
533 access_dcsw },
534 /* DC CSW */
535 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010),
536 access_dcsw },
537 /* DC CISW */
538 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
539 access_dcsw },
540
0c557ed4
MZ
541 DBG_BCR_BVR_WCR_WVR_EL1(0),
542 DBG_BCR_BVR_WCR_WVR_EL1(1),
543 /* MDCCINT_EL1 */
544 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
545 trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
546 /* MDSCR_EL1 */
547 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
548 trap_debug_regs, reset_val, MDSCR_EL1, 0 },
549 DBG_BCR_BVR_WCR_WVR_EL1(2),
550 DBG_BCR_BVR_WCR_WVR_EL1(3),
551 DBG_BCR_BVR_WCR_WVR_EL1(4),
552 DBG_BCR_BVR_WCR_WVR_EL1(5),
553 DBG_BCR_BVR_WCR_WVR_EL1(6),
554 DBG_BCR_BVR_WCR_WVR_EL1(7),
555 DBG_BCR_BVR_WCR_WVR_EL1(8),
556 DBG_BCR_BVR_WCR_WVR_EL1(9),
557 DBG_BCR_BVR_WCR_WVR_EL1(10),
558 DBG_BCR_BVR_WCR_WVR_EL1(11),
559 DBG_BCR_BVR_WCR_WVR_EL1(12),
560 DBG_BCR_BVR_WCR_WVR_EL1(13),
561 DBG_BCR_BVR_WCR_WVR_EL1(14),
562 DBG_BCR_BVR_WCR_WVR_EL1(15),
563
564 /* MDRAR_EL1 */
565 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
566 trap_raz_wi },
567 /* OSLAR_EL1 */
568 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100),
569 trap_raz_wi },
570 /* OSLSR_EL1 */
571 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100),
572 trap_oslsr_el1 },
573 /* OSDLR_EL1 */
574 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100),
575 trap_raz_wi },
576 /* DBGPRCR_EL1 */
577 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100),
578 trap_raz_wi },
579 /* DBGCLAIMSET_EL1 */
580 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110),
581 trap_raz_wi },
582 /* DBGCLAIMCLR_EL1 */
583 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110),
584 trap_raz_wi },
585 /* DBGAUTHSTATUS_EL1 */
586 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110),
587 trap_dbgauthstatus_el1 },
588
0c557ed4
MZ
589 /* MDCCSR_EL1 */
590 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000),
591 trap_raz_wi },
592 /* DBGDTR_EL0 */
593 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000),
594 trap_raz_wi },
595 /* DBGDTR[TR]X_EL0 */
596 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000),
597 trap_raz_wi },
598
62a89c44
MZ
599 /* DBGVCR32_EL2 */
600 { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
601 NULL, reset_val, DBGVCR32_EL2, 0 },
602
7c8c5e6a
MZ
603 /* MPIDR_EL1 */
604 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101),
605 NULL, reset_mpidr, MPIDR_EL1 },
606 /* SCTLR_EL1 */
607 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
3c1e7165 608 access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
7c8c5e6a
MZ
609 /* CPACR_EL1 */
610 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
611 NULL, reset_val, CPACR_EL1, 0 },
612 /* TTBR0_EL1 */
613 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
4d44923b 614 access_vm_reg, reset_unknown, TTBR0_EL1 },
7c8c5e6a
MZ
615 /* TTBR1_EL1 */
616 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
4d44923b 617 access_vm_reg, reset_unknown, TTBR1_EL1 },
7c8c5e6a
MZ
618 /* TCR_EL1 */
619 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
4d44923b 620 access_vm_reg, reset_val, TCR_EL1, 0 },
7c8c5e6a
MZ
621
622 /* AFSR0_EL1 */
623 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
4d44923b 624 access_vm_reg, reset_unknown, AFSR0_EL1 },
7c8c5e6a
MZ
625 /* AFSR1_EL1 */
626 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
4d44923b 627 access_vm_reg, reset_unknown, AFSR1_EL1 },
7c8c5e6a
MZ
628 /* ESR_EL1 */
629 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
4d44923b 630 access_vm_reg, reset_unknown, ESR_EL1 },
7c8c5e6a
MZ
631 /* FAR_EL1 */
632 { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
4d44923b 633 access_vm_reg, reset_unknown, FAR_EL1 },
1bbd8054
MZ
634 /* PAR_EL1 */
635 { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
636 NULL, reset_unknown, PAR_EL1 },
7c8c5e6a
MZ
637
638 /* PMINTENSET_EL1 */
639 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
7609c125 640 trap_raz_wi },
7c8c5e6a
MZ
641 /* PMINTENCLR_EL1 */
642 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
7609c125 643 trap_raz_wi },
7c8c5e6a
MZ
644
645 /* MAIR_EL1 */
646 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
4d44923b 647 access_vm_reg, reset_unknown, MAIR_EL1 },
7c8c5e6a
MZ
648 /* AMAIR_EL1 */
649 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
4d44923b 650 access_vm_reg, reset_amair_el1, AMAIR_EL1 },
7c8c5e6a
MZ
651
652 /* VBAR_EL1 */
653 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
654 NULL, reset_val, VBAR_EL1, 0 },
db7dedd0 655
6d52f35a
AP
656 /* ICC_SGI1R_EL1 */
657 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1011), Op2(0b101),
658 access_gic_sgi },
db7dedd0
CD
659 /* ICC_SRE_EL1 */
660 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
661 trap_raz_wi },
662
7c8c5e6a
MZ
663 /* CONTEXTIDR_EL1 */
664 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
4d44923b 665 access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
7c8c5e6a
MZ
666 /* TPIDR_EL1 */
667 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
668 NULL, reset_unknown, TPIDR_EL1 },
669
670 /* CNTKCTL_EL1 */
671 { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000),
672 NULL, reset_val, CNTKCTL_EL1, 0},
673
674 /* CSSELR_EL1 */
675 { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
676 NULL, reset_unknown, CSSELR_EL1 },
677
678 /* PMCR_EL0 */
679 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
ab946834 680 access_pmcr, reset_pmcr, },
7c8c5e6a
MZ
681 /* PMCNTENSET_EL0 */
682 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
7609c125 683 trap_raz_wi },
7c8c5e6a
MZ
684 /* PMCNTENCLR_EL0 */
685 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
7609c125 686 trap_raz_wi },
7c8c5e6a
MZ
687 /* PMOVSCLR_EL0 */
688 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
7609c125 689 trap_raz_wi },
7c8c5e6a
MZ
690 /* PMSWINC_EL0 */
691 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
7609c125 692 trap_raz_wi },
7c8c5e6a
MZ
693 /* PMSELR_EL0 */
694 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
3965c3ce 695 access_pmselr, reset_unknown, PMSELR_EL0 },
7c8c5e6a
MZ
696 /* PMCEID0_EL0 */
697 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
7609c125 698 trap_raz_wi },
7c8c5e6a
MZ
699 /* PMCEID1_EL0 */
700 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
7609c125 701 trap_raz_wi },
7c8c5e6a
MZ
702 /* PMCCNTR_EL0 */
703 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
7609c125 704 trap_raz_wi },
7c8c5e6a
MZ
705 /* PMXEVTYPER_EL0 */
706 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
7609c125 707 trap_raz_wi },
7c8c5e6a
MZ
708 /* PMXEVCNTR_EL0 */
709 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
7609c125 710 trap_raz_wi },
7c8c5e6a
MZ
711 /* PMUSERENR_EL0 */
712 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
7609c125 713 trap_raz_wi },
7c8c5e6a
MZ
714 /* PMOVSSET_EL0 */
715 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
7609c125 716 trap_raz_wi },
7c8c5e6a
MZ
717
718 /* TPIDR_EL0 */
719 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
720 NULL, reset_unknown, TPIDR_EL0 },
721 /* TPIDRRO_EL0 */
722 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
723 NULL, reset_unknown, TPIDRRO_EL0 },
62a89c44
MZ
724
725 /* DACR32_EL2 */
726 { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
727 NULL, reset_unknown, DACR32_EL2 },
728 /* IFSR32_EL2 */
729 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001),
730 NULL, reset_unknown, IFSR32_EL2 },
731 /* FPEXC32_EL2 */
732 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000),
733 NULL, reset_val, FPEXC32_EL2, 0x70 },
734};
735
bdfb4b38 736static bool trap_dbgidr(struct kvm_vcpu *vcpu,
3fec037d 737 struct sys_reg_params *p,
bdfb4b38
MZ
738 const struct sys_reg_desc *r)
739{
740 if (p->is_write) {
741 return ignore_write(vcpu, p);
742 } else {
4db8e5ea
SP
743 u64 dfr = read_system_reg(SYS_ID_AA64DFR0_EL1);
744 u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1);
745 u32 el3 = !!cpuid_feature_extract_field(pfr, ID_AA64PFR0_EL3_SHIFT);
bdfb4b38 746
2ec5be3d
PF
747 p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
748 (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
749 (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
750 | (6 << 16) | (el3 << 14) | (el3 << 12));
bdfb4b38
MZ
751 return true;
752 }
753}
754
755static bool trap_debug32(struct kvm_vcpu *vcpu,
3fec037d 756 struct sys_reg_params *p,
bdfb4b38
MZ
757 const struct sys_reg_desc *r)
758{
759 if (p->is_write) {
2ec5be3d 760 vcpu_cp14(vcpu, r->reg) = p->regval;
bdfb4b38
MZ
761 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
762 } else {
2ec5be3d 763 p->regval = vcpu_cp14(vcpu, r->reg);
bdfb4b38
MZ
764 }
765
766 return true;
767}
768
84e690bf
AB
769/* AArch32 debug register mappings
770 *
771 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
772 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
773 *
774 * All control registers and watchpoint value registers are mapped to
775 * the lower 32 bits of their AArch64 equivalents. We share the trap
776 * handlers with the above AArch64 code which checks what mode the
777 * system is in.
778 */
779
281243cb
MZ
780static bool trap_xvr(struct kvm_vcpu *vcpu,
781 struct sys_reg_params *p,
782 const struct sys_reg_desc *rd)
84e690bf
AB
783{
784 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
785
786 if (p->is_write) {
787 u64 val = *dbg_reg;
788
789 val &= 0xffffffffUL;
2ec5be3d 790 val |= p->regval << 32;
84e690bf
AB
791 *dbg_reg = val;
792
793 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
794 } else {
2ec5be3d 795 p->regval = *dbg_reg >> 32;
84e690bf
AB
796 }
797
eef8c85a
AB
798 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
799
84e690bf
AB
800 return true;
801}
802
803#define DBG_BCR_BVR_WCR_WVR(n) \
804 /* DBGBVRn */ \
805 { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
806 /* DBGBCRn */ \
807 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
808 /* DBGWVRn */ \
809 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
810 /* DBGWCRn */ \
811 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
812
813#define DBGBXVR(n) \
814 { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
bdfb4b38
MZ
815
816/*
817 * Trapped cp14 registers. We generally ignore most of the external
818 * debug, on the principle that they don't really make sense to a
84e690bf 819 * guest. Revisit this one day, would this principle change.
bdfb4b38 820 */
72564016 821static const struct sys_reg_desc cp14_regs[] = {
bdfb4b38
MZ
822 /* DBGIDR */
823 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
824 /* DBGDTRRXext */
825 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
826
827 DBG_BCR_BVR_WCR_WVR(0),
828 /* DBGDSCRint */
829 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
830 DBG_BCR_BVR_WCR_WVR(1),
831 /* DBGDCCINT */
832 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
833 /* DBGDSCRext */
834 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
835 DBG_BCR_BVR_WCR_WVR(2),
836 /* DBGDTR[RT]Xint */
837 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
838 /* DBGDTR[RT]Xext */
839 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
840 DBG_BCR_BVR_WCR_WVR(3),
841 DBG_BCR_BVR_WCR_WVR(4),
842 DBG_BCR_BVR_WCR_WVR(5),
843 /* DBGWFAR */
844 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
845 /* DBGOSECCR */
846 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
847 DBG_BCR_BVR_WCR_WVR(6),
848 /* DBGVCR */
849 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
850 DBG_BCR_BVR_WCR_WVR(7),
851 DBG_BCR_BVR_WCR_WVR(8),
852 DBG_BCR_BVR_WCR_WVR(9),
853 DBG_BCR_BVR_WCR_WVR(10),
854 DBG_BCR_BVR_WCR_WVR(11),
855 DBG_BCR_BVR_WCR_WVR(12),
856 DBG_BCR_BVR_WCR_WVR(13),
857 DBG_BCR_BVR_WCR_WVR(14),
858 DBG_BCR_BVR_WCR_WVR(15),
859
860 /* DBGDRAR (32bit) */
861 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
862
863 DBGBXVR(0),
864 /* DBGOSLAR */
865 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
866 DBGBXVR(1),
867 /* DBGOSLSR */
868 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
869 DBGBXVR(2),
870 DBGBXVR(3),
871 /* DBGOSDLR */
872 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
873 DBGBXVR(4),
874 /* DBGPRCR */
875 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
876 DBGBXVR(5),
877 DBGBXVR(6),
878 DBGBXVR(7),
879 DBGBXVR(8),
880 DBGBXVR(9),
881 DBGBXVR(10),
882 DBGBXVR(11),
883 DBGBXVR(12),
884 DBGBXVR(13),
885 DBGBXVR(14),
886 DBGBXVR(15),
887
888 /* DBGDSAR (32bit) */
889 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
890
891 /* DBGDEVID2 */
892 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
893 /* DBGDEVID1 */
894 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
895 /* DBGDEVID */
896 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
897 /* DBGCLAIMSET */
898 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
899 /* DBGCLAIMCLR */
900 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
901 /* DBGAUTHSTATUS */
902 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
72564016
MZ
903};
904
a9866ba0
MZ
905/* Trapped cp14 64bit registers */
906static const struct sys_reg_desc cp14_64_regs[] = {
bdfb4b38
MZ
907 /* DBGDRAR (64bit) */
908 { Op1( 0), CRm( 1), .access = trap_raz_wi },
909
910 /* DBGDSAR (64bit) */
911 { Op1( 0), CRm( 2), .access = trap_raz_wi },
a9866ba0
MZ
912};
913
4d44923b
MZ
914/*
915 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
916 * depending on the way they are accessed (as a 32bit or a 64bit
917 * register).
918 */
62a89c44 919static const struct sys_reg_desc cp15_regs[] = {
6d52f35a
AP
920 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
921
3c1e7165 922 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
4d44923b
MZ
923 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
924 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
925 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
926 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
927 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
928 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
929 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
930 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
931 { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
932 { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
933
62a89c44
MZ
934 /*
935 * DC{C,I,CI}SW operations:
936 */
937 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
938 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
939 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
4d44923b 940
7609c125 941 /* PMU */
ab946834 942 { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
7609c125
MZ
943 { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi },
944 { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi },
945 { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi },
3965c3ce 946 { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
7609c125
MZ
947 { Op1( 0), CRn( 9), CRm(12), Op2( 6), trap_raz_wi },
948 { Op1( 0), CRn( 9), CRm(12), Op2( 7), trap_raz_wi },
949 { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi },
950 { Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi },
951 { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi },
952 { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi },
953 { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi },
954 { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi },
4d44923b
MZ
955
956 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
957 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
958 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
959 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
db7dedd0
CD
960
961 /* ICC_SRE */
962 { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi },
963
4d44923b 964 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
a9866ba0
MZ
965};
966
967static const struct sys_reg_desc cp15_64_regs[] = {
968 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
6d52f35a 969 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
4d44923b 970 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
7c8c5e6a
MZ
971};
972
973/* Target specific emulation tables */
974static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
975
976void kvm_register_target_sys_reg_table(unsigned int target,
977 struct kvm_sys_reg_target_table *table)
978{
979 target_tables[target] = table;
980}
981
982/* Get specific register table for this target. */
62a89c44
MZ
983static const struct sys_reg_desc *get_target_table(unsigned target,
984 bool mode_is_64,
985 size_t *num)
7c8c5e6a
MZ
986{
987 struct kvm_sys_reg_target_table *table;
988
989 table = target_tables[target];
62a89c44
MZ
990 if (mode_is_64) {
991 *num = table->table64.num;
992 return table->table64.table;
993 } else {
994 *num = table->table32.num;
995 return table->table32.table;
996 }
7c8c5e6a
MZ
997}
998
999static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
1000 const struct sys_reg_desc table[],
1001 unsigned int num)
1002{
1003 unsigned int i;
1004
1005 for (i = 0; i < num; i++) {
1006 const struct sys_reg_desc *r = &table[i];
1007
1008 if (params->Op0 != r->Op0)
1009 continue;
1010 if (params->Op1 != r->Op1)
1011 continue;
1012 if (params->CRn != r->CRn)
1013 continue;
1014 if (params->CRm != r->CRm)
1015 continue;
1016 if (params->Op2 != r->Op2)
1017 continue;
1018
1019 return r;
1020 }
1021 return NULL;
1022}
1023
62a89c44
MZ
1024int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
1025{
1026 kvm_inject_undefined(vcpu);
1027 return 1;
1028}
1029
72564016
MZ
1030/*
1031 * emulate_cp -- tries to match a sys_reg access in a handling table, and
1032 * call the corresponding trap handler.
1033 *
1034 * @params: pointer to the descriptor of the access
1035 * @table: array of trap descriptors
1036 * @num: size of the trap descriptor array
1037 *
1038 * Return 0 if the access has been handled, and -1 if not.
1039 */
1040static int emulate_cp(struct kvm_vcpu *vcpu,
3fec037d 1041 struct sys_reg_params *params,
72564016
MZ
1042 const struct sys_reg_desc *table,
1043 size_t num)
62a89c44 1044{
72564016 1045 const struct sys_reg_desc *r;
62a89c44 1046
72564016
MZ
1047 if (!table)
1048 return -1; /* Not handled */
62a89c44 1049
62a89c44 1050 r = find_reg(params, table, num);
62a89c44 1051
72564016 1052 if (r) {
62a89c44
MZ
1053 /*
1054 * Not having an accessor means that we have
1055 * configured a trap that we don't know how to
1056 * handle. This certainly qualifies as a gross bug
1057 * that should be fixed right away.
1058 */
1059 BUG_ON(!r->access);
1060
1061 if (likely(r->access(vcpu, params, r))) {
1062 /* Skip instruction, since it was emulated */
1063 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
6327f35a
SZ
1064 /* Handled */
1065 return 0;
62a89c44 1066 }
72564016
MZ
1067 }
1068
1069 /* Not handled */
1070 return -1;
1071}
1072
1073static void unhandled_cp_access(struct kvm_vcpu *vcpu,
1074 struct sys_reg_params *params)
1075{
1076 u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
1077 int cp;
1078
1079 switch(hsr_ec) {
c6d01a94
MR
1080 case ESR_ELx_EC_CP15_32:
1081 case ESR_ELx_EC_CP15_64:
72564016
MZ
1082 cp = 15;
1083 break;
c6d01a94
MR
1084 case ESR_ELx_EC_CP14_MR:
1085 case ESR_ELx_EC_CP14_64:
72564016
MZ
1086 cp = 14;
1087 break;
1088 default:
1089 WARN_ON((cp = -1));
62a89c44
MZ
1090 }
1091
72564016
MZ
1092 kvm_err("Unsupported guest CP%d access at: %08lx\n",
1093 cp, *vcpu_pc(vcpu));
62a89c44
MZ
1094 print_sys_reg_instr(params);
1095 kvm_inject_undefined(vcpu);
1096}
1097
1098/**
7769db90 1099 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
62a89c44
MZ
1100 * @vcpu: The VCPU pointer
1101 * @run: The kvm_run struct
1102 */
72564016
MZ
1103static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1104 const struct sys_reg_desc *global,
1105 size_t nr_global,
1106 const struct sys_reg_desc *target_specific,
1107 size_t nr_specific)
62a89c44
MZ
1108{
1109 struct sys_reg_params params;
1110 u32 hsr = kvm_vcpu_get_hsr(vcpu);
2ec5be3d 1111 int Rt = (hsr >> 5) & 0xf;
62a89c44
MZ
1112 int Rt2 = (hsr >> 10) & 0xf;
1113
2072d29c
MZ
1114 params.is_aarch32 = true;
1115 params.is_32bit = false;
62a89c44 1116 params.CRm = (hsr >> 1) & 0xf;
62a89c44
MZ
1117 params.is_write = ((hsr & 1) == 0);
1118
1119 params.Op0 = 0;
1120 params.Op1 = (hsr >> 16) & 0xf;
1121 params.Op2 = 0;
1122 params.CRn = 0;
1123
1124 /*
2ec5be3d 1125 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
62a89c44
MZ
1126 * backends between AArch32 and AArch64, we get away with it.
1127 */
1128 if (params.is_write) {
2ec5be3d
PF
1129 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
1130 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
62a89c44
MZ
1131 }
1132
72564016
MZ
1133 if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
1134 goto out;
1135 if (!emulate_cp(vcpu, &params, global, nr_global))
1136 goto out;
1137
1138 unhandled_cp_access(vcpu, &params);
62a89c44 1139
72564016 1140out:
2ec5be3d 1141 /* Split up the value between registers for the read side */
62a89c44 1142 if (!params.is_write) {
2ec5be3d
PF
1143 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
1144 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
62a89c44
MZ
1145 }
1146
1147 return 1;
1148}
1149
1150/**
7769db90 1151 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
62a89c44
MZ
1152 * @vcpu: The VCPU pointer
1153 * @run: The kvm_run struct
1154 */
72564016
MZ
1155static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
1156 const struct sys_reg_desc *global,
1157 size_t nr_global,
1158 const struct sys_reg_desc *target_specific,
1159 size_t nr_specific)
62a89c44
MZ
1160{
1161 struct sys_reg_params params;
1162 u32 hsr = kvm_vcpu_get_hsr(vcpu);
2ec5be3d 1163 int Rt = (hsr >> 5) & 0xf;
62a89c44 1164
2072d29c
MZ
1165 params.is_aarch32 = true;
1166 params.is_32bit = true;
62a89c44 1167 params.CRm = (hsr >> 1) & 0xf;
2ec5be3d 1168 params.regval = vcpu_get_reg(vcpu, Rt);
62a89c44
MZ
1169 params.is_write = ((hsr & 1) == 0);
1170 params.CRn = (hsr >> 10) & 0xf;
1171 params.Op0 = 0;
1172 params.Op1 = (hsr >> 14) & 0x7;
1173 params.Op2 = (hsr >> 17) & 0x7;
1174
2ec5be3d
PF
1175 if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
1176 !emulate_cp(vcpu, &params, global, nr_global)) {
1177 if (!params.is_write)
1178 vcpu_set_reg(vcpu, Rt, params.regval);
72564016 1179 return 1;
2ec5be3d 1180 }
72564016
MZ
1181
1182 unhandled_cp_access(vcpu, &params);
62a89c44
MZ
1183 return 1;
1184}
1185
72564016
MZ
1186int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
1187{
1188 const struct sys_reg_desc *target_specific;
1189 size_t num;
1190
1191 target_specific = get_target_table(vcpu->arch.target, false, &num);
1192 return kvm_handle_cp_64(vcpu,
a9866ba0 1193 cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
72564016
MZ
1194 target_specific, num);
1195}
1196
1197int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
1198{
1199 const struct sys_reg_desc *target_specific;
1200 size_t num;
1201
1202 target_specific = get_target_table(vcpu->arch.target, false, &num);
1203 return kvm_handle_cp_32(vcpu,
1204 cp15_regs, ARRAY_SIZE(cp15_regs),
1205 target_specific, num);
1206}
1207
1208int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
1209{
1210 return kvm_handle_cp_64(vcpu,
a9866ba0 1211 cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
72564016
MZ
1212 NULL, 0);
1213}
1214
1215int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
1216{
1217 return kvm_handle_cp_32(vcpu,
1218 cp14_regs, ARRAY_SIZE(cp14_regs),
1219 NULL, 0);
1220}
1221
7c8c5e6a 1222static int emulate_sys_reg(struct kvm_vcpu *vcpu,
3fec037d 1223 struct sys_reg_params *params)
7c8c5e6a
MZ
1224{
1225 size_t num;
1226 const struct sys_reg_desc *table, *r;
1227
62a89c44 1228 table = get_target_table(vcpu->arch.target, true, &num);
7c8c5e6a
MZ
1229
1230 /* Search target-specific then generic table. */
1231 r = find_reg(params, table, num);
1232 if (!r)
1233 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
1234
1235 if (likely(r)) {
1236 /*
1237 * Not having an accessor means that we have
1238 * configured a trap that we don't know how to
1239 * handle. This certainly qualifies as a gross bug
1240 * that should be fixed right away.
1241 */
1242 BUG_ON(!r->access);
1243
1244 if (likely(r->access(vcpu, params, r))) {
1245 /* Skip instruction, since it was emulated */
1246 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1247 return 1;
1248 }
1249 /* If access function fails, it should complain. */
1250 } else {
1251 kvm_err("Unsupported guest sys_reg access at: %lx\n",
1252 *vcpu_pc(vcpu));
1253 print_sys_reg_instr(params);
1254 }
1255 kvm_inject_undefined(vcpu);
1256 return 1;
1257}
1258
1259static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
1260 const struct sys_reg_desc *table, size_t num)
1261{
1262 unsigned long i;
1263
1264 for (i = 0; i < num; i++)
1265 if (table[i].reset)
1266 table[i].reset(vcpu, &table[i]);
1267}
1268
1269/**
1270 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
1271 * @vcpu: The VCPU pointer
1272 * @run: The kvm_run struct
1273 */
1274int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
1275{
1276 struct sys_reg_params params;
1277 unsigned long esr = kvm_vcpu_get_hsr(vcpu);
2ec5be3d
PF
1278 int Rt = (esr >> 5) & 0x1f;
1279 int ret;
7c8c5e6a 1280
eef8c85a
AB
1281 trace_kvm_handle_sys_reg(esr);
1282
2072d29c
MZ
1283 params.is_aarch32 = false;
1284 params.is_32bit = false;
7c8c5e6a
MZ
1285 params.Op0 = (esr >> 20) & 3;
1286 params.Op1 = (esr >> 14) & 0x7;
1287 params.CRn = (esr >> 10) & 0xf;
1288 params.CRm = (esr >> 1) & 0xf;
1289 params.Op2 = (esr >> 17) & 0x7;
2ec5be3d 1290 params.regval = vcpu_get_reg(vcpu, Rt);
7c8c5e6a
MZ
1291 params.is_write = !(esr & 1);
1292
2ec5be3d
PF
1293 ret = emulate_sys_reg(vcpu, &params);
1294
1295 if (!params.is_write)
1296 vcpu_set_reg(vcpu, Rt, params.regval);
1297 return ret;
7c8c5e6a
MZ
1298}
1299
1300/******************************************************************************
1301 * Userspace API
1302 *****************************************************************************/
1303
1304static bool index_to_params(u64 id, struct sys_reg_params *params)
1305{
1306 switch (id & KVM_REG_SIZE_MASK) {
1307 case KVM_REG_SIZE_U64:
1308 /* Any unused index bits means it's not valid. */
1309 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
1310 | KVM_REG_ARM_COPROC_MASK
1311 | KVM_REG_ARM64_SYSREG_OP0_MASK
1312 | KVM_REG_ARM64_SYSREG_OP1_MASK
1313 | KVM_REG_ARM64_SYSREG_CRN_MASK
1314 | KVM_REG_ARM64_SYSREG_CRM_MASK
1315 | KVM_REG_ARM64_SYSREG_OP2_MASK))
1316 return false;
1317 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
1318 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
1319 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
1320 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
1321 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
1322 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
1323 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
1324 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
1325 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
1326 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
1327 return true;
1328 default:
1329 return false;
1330 }
1331}
1332
1333/* Decode an index value, and find the sys_reg_desc entry. */
1334static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
1335 u64 id)
1336{
1337 size_t num;
1338 const struct sys_reg_desc *table, *r;
1339 struct sys_reg_params params;
1340
1341 /* We only do sys_reg for now. */
1342 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
1343 return NULL;
1344
1345 if (!index_to_params(id, &params))
1346 return NULL;
1347
62a89c44 1348 table = get_target_table(vcpu->arch.target, true, &num);
7c8c5e6a
MZ
1349 r = find_reg(&params, table, num);
1350 if (!r)
1351 r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
1352
1353 /* Not saved in the sys_reg array? */
1354 if (r && !r->reg)
1355 r = NULL;
1356
1357 return r;
1358}
1359
1360/*
1361 * These are the invariant sys_reg registers: we let the guest see the
1362 * host versions of these, so they're part of the guest state.
1363 *
1364 * A future CPU may provide a mechanism to present different values to
1365 * the guest, or a future kvm may trap them.
1366 */
1367
1368#define FUNCTION_INVARIANT(reg) \
1369 static void get_##reg(struct kvm_vcpu *v, \
1370 const struct sys_reg_desc *r) \
1371 { \
1372 u64 val; \
1373 \
1374 asm volatile("mrs %0, " __stringify(reg) "\n" \
1375 : "=r" (val)); \
1376 ((struct sys_reg_desc *)r)->val = val; \
1377 }
1378
1379FUNCTION_INVARIANT(midr_el1)
1380FUNCTION_INVARIANT(ctr_el0)
1381FUNCTION_INVARIANT(revidr_el1)
1382FUNCTION_INVARIANT(id_pfr0_el1)
1383FUNCTION_INVARIANT(id_pfr1_el1)
1384FUNCTION_INVARIANT(id_dfr0_el1)
1385FUNCTION_INVARIANT(id_afr0_el1)
1386FUNCTION_INVARIANT(id_mmfr0_el1)
1387FUNCTION_INVARIANT(id_mmfr1_el1)
1388FUNCTION_INVARIANT(id_mmfr2_el1)
1389FUNCTION_INVARIANT(id_mmfr3_el1)
1390FUNCTION_INVARIANT(id_isar0_el1)
1391FUNCTION_INVARIANT(id_isar1_el1)
1392FUNCTION_INVARIANT(id_isar2_el1)
1393FUNCTION_INVARIANT(id_isar3_el1)
1394FUNCTION_INVARIANT(id_isar4_el1)
1395FUNCTION_INVARIANT(id_isar5_el1)
1396FUNCTION_INVARIANT(clidr_el1)
1397FUNCTION_INVARIANT(aidr_el1)
1398
1399/* ->val is filled in by kvm_sys_reg_table_init() */
1400static struct sys_reg_desc invariant_sys_regs[] = {
1401 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000),
1402 NULL, get_midr_el1 },
1403 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110),
1404 NULL, get_revidr_el1 },
1405 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000),
1406 NULL, get_id_pfr0_el1 },
1407 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001),
1408 NULL, get_id_pfr1_el1 },
1409 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010),
1410 NULL, get_id_dfr0_el1 },
1411 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011),
1412 NULL, get_id_afr0_el1 },
1413 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100),
1414 NULL, get_id_mmfr0_el1 },
1415 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101),
1416 NULL, get_id_mmfr1_el1 },
1417 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110),
1418 NULL, get_id_mmfr2_el1 },
1419 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111),
1420 NULL, get_id_mmfr3_el1 },
1421 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
1422 NULL, get_id_isar0_el1 },
1423 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001),
1424 NULL, get_id_isar1_el1 },
1425 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
1426 NULL, get_id_isar2_el1 },
1427 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011),
1428 NULL, get_id_isar3_el1 },
1429 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100),
1430 NULL, get_id_isar4_el1 },
1431 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101),
1432 NULL, get_id_isar5_el1 },
1433 { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001),
1434 NULL, get_clidr_el1 },
1435 { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111),
1436 NULL, get_aidr_el1 },
1437 { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001),
1438 NULL, get_ctr_el0 },
1439};
1440
26c99af1 1441static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
7c8c5e6a 1442{
7c8c5e6a
MZ
1443 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
1444 return -EFAULT;
1445 return 0;
1446}
1447
26c99af1 1448static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
7c8c5e6a 1449{
7c8c5e6a
MZ
1450 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
1451 return -EFAULT;
1452 return 0;
1453}
1454
1455static int get_invariant_sys_reg(u64 id, void __user *uaddr)
1456{
1457 struct sys_reg_params params;
1458 const struct sys_reg_desc *r;
1459
1460 if (!index_to_params(id, &params))
1461 return -ENOENT;
1462
1463 r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
1464 if (!r)
1465 return -ENOENT;
1466
1467 return reg_to_user(uaddr, &r->val, id);
1468}
1469
1470static int set_invariant_sys_reg(u64 id, void __user *uaddr)
1471{
1472 struct sys_reg_params params;
1473 const struct sys_reg_desc *r;
1474 int err;
1475 u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
1476
1477 if (!index_to_params(id, &params))
1478 return -ENOENT;
1479 r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
1480 if (!r)
1481 return -ENOENT;
1482
1483 err = reg_from_user(&val, uaddr, id);
1484 if (err)
1485 return err;
1486
1487 /* This is what we mean by invariant: you can't change it. */
1488 if (r->val != val)
1489 return -EINVAL;
1490
1491 return 0;
1492}
1493
1494static bool is_valid_cache(u32 val)
1495{
1496 u32 level, ctype;
1497
1498 if (val >= CSSELR_MAX)
18d45766 1499 return false;
7c8c5e6a
MZ
1500
1501 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
1502 level = (val >> 1);
1503 ctype = (cache_levels >> (level * 3)) & 7;
1504
1505 switch (ctype) {
1506 case 0: /* No cache */
1507 return false;
1508 case 1: /* Instruction cache only */
1509 return (val & 1);
1510 case 2: /* Data cache only */
1511 case 4: /* Unified cache */
1512 return !(val & 1);
1513 case 3: /* Separate instruction and data caches */
1514 return true;
1515 default: /* Reserved: we can't know instruction or data. */
1516 return false;
1517 }
1518}
1519
1520static int demux_c15_get(u64 id, void __user *uaddr)
1521{
1522 u32 val;
1523 u32 __user *uval = uaddr;
1524
1525 /* Fail if we have unknown bits set. */
1526 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1527 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1528 return -ENOENT;
1529
1530 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
1531 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
1532 if (KVM_REG_SIZE(id) != 4)
1533 return -ENOENT;
1534 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
1535 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
1536 if (!is_valid_cache(val))
1537 return -ENOENT;
1538
1539 return put_user(get_ccsidr(val), uval);
1540 default:
1541 return -ENOENT;
1542 }
1543}
1544
1545static int demux_c15_set(u64 id, void __user *uaddr)
1546{
1547 u32 val, newval;
1548 u32 __user *uval = uaddr;
1549
1550 /* Fail if we have unknown bits set. */
1551 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1552 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1553 return -ENOENT;
1554
1555 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
1556 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
1557 if (KVM_REG_SIZE(id) != 4)
1558 return -ENOENT;
1559 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
1560 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
1561 if (!is_valid_cache(val))
1562 return -ENOENT;
1563
1564 if (get_user(newval, uval))
1565 return -EFAULT;
1566
1567 /* This is also invariant: you can't change it. */
1568 if (newval != get_ccsidr(val))
1569 return -EINVAL;
1570 return 0;
1571 default:
1572 return -ENOENT;
1573 }
1574}
1575
1576int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1577{
1578 const struct sys_reg_desc *r;
1579 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
1580
1581 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1582 return demux_c15_get(reg->id, uaddr);
1583
1584 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
1585 return -ENOENT;
1586
1587 r = index_to_sys_reg_desc(vcpu, reg->id);
1588 if (!r)
1589 return get_invariant_sys_reg(reg->id, uaddr);
1590
84e690bf
AB
1591 if (r->get_user)
1592 return (r->get_user)(vcpu, r, reg, uaddr);
1593
7c8c5e6a
MZ
1594 return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
1595}
1596
1597int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1598{
1599 const struct sys_reg_desc *r;
1600 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
1601
1602 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1603 return demux_c15_set(reg->id, uaddr);
1604
1605 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
1606 return -ENOENT;
1607
1608 r = index_to_sys_reg_desc(vcpu, reg->id);
1609 if (!r)
1610 return set_invariant_sys_reg(reg->id, uaddr);
1611
84e690bf
AB
1612 if (r->set_user)
1613 return (r->set_user)(vcpu, r, reg, uaddr);
1614
7c8c5e6a
MZ
1615 return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
1616}
1617
1618static unsigned int num_demux_regs(void)
1619{
1620 unsigned int i, count = 0;
1621
1622 for (i = 0; i < CSSELR_MAX; i++)
1623 if (is_valid_cache(i))
1624 count++;
1625
1626 return count;
1627}
1628
1629static int write_demux_regids(u64 __user *uindices)
1630{
efd48cea 1631 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
7c8c5e6a
MZ
1632 unsigned int i;
1633
1634 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
1635 for (i = 0; i < CSSELR_MAX; i++) {
1636 if (!is_valid_cache(i))
1637 continue;
1638 if (put_user(val | i, uindices))
1639 return -EFAULT;
1640 uindices++;
1641 }
1642 return 0;
1643}
1644
1645static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
1646{
1647 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
1648 KVM_REG_ARM64_SYSREG |
1649 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
1650 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
1651 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
1652 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
1653 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
1654}
1655
1656static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
1657{
1658 if (!*uind)
1659 return true;
1660
1661 if (put_user(sys_reg_to_index(reg), *uind))
1662 return false;
1663
1664 (*uind)++;
1665 return true;
1666}
1667
1668/* Assumed ordered tables, see kvm_sys_reg_table_init. */
1669static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
1670{
1671 const struct sys_reg_desc *i1, *i2, *end1, *end2;
1672 unsigned int total = 0;
1673 size_t num;
1674
1675 /* We check for duplicates here, to allow arch-specific overrides. */
62a89c44 1676 i1 = get_target_table(vcpu->arch.target, true, &num);
7c8c5e6a
MZ
1677 end1 = i1 + num;
1678 i2 = sys_reg_descs;
1679 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
1680
1681 BUG_ON(i1 == end1 || i2 == end2);
1682
1683 /* Walk carefully, as both tables may refer to the same register. */
1684 while (i1 || i2) {
1685 int cmp = cmp_sys_reg(i1, i2);
1686 /* target-specific overrides generic entry. */
1687 if (cmp <= 0) {
1688 /* Ignore registers we trap but don't save. */
1689 if (i1->reg) {
1690 if (!copy_reg_to_user(i1, &uind))
1691 return -EFAULT;
1692 total++;
1693 }
1694 } else {
1695 /* Ignore registers we trap but don't save. */
1696 if (i2->reg) {
1697 if (!copy_reg_to_user(i2, &uind))
1698 return -EFAULT;
1699 total++;
1700 }
1701 }
1702
1703 if (cmp <= 0 && ++i1 == end1)
1704 i1 = NULL;
1705 if (cmp >= 0 && ++i2 == end2)
1706 i2 = NULL;
1707 }
1708 return total;
1709}
1710
1711unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
1712{
1713 return ARRAY_SIZE(invariant_sys_regs)
1714 + num_demux_regs()
1715 + walk_sys_regs(vcpu, (u64 __user *)NULL);
1716}
1717
1718int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1719{
1720 unsigned int i;
1721 int err;
1722
1723 /* Then give them all the invariant registers' indices. */
1724 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
1725 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
1726 return -EFAULT;
1727 uindices++;
1728 }
1729
1730 err = walk_sys_regs(vcpu, uindices);
1731 if (err < 0)
1732 return err;
1733 uindices += err;
1734
1735 return write_demux_regids(uindices);
1736}
1737
e6a95517
MZ
1738static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
1739{
1740 unsigned int i;
1741
1742 for (i = 1; i < n; i++) {
1743 if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
1744 kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
1745 return 1;
1746 }
1747 }
1748
1749 return 0;
1750}
1751
7c8c5e6a
MZ
1752void kvm_sys_reg_table_init(void)
1753{
1754 unsigned int i;
1755 struct sys_reg_desc clidr;
1756
1757 /* Make sure tables are unique and in order. */
e6a95517
MZ
1758 BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
1759 BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
1760 BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
1761 BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
1762 BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
1763 BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
7c8c5e6a
MZ
1764
1765 /* We abuse the reset function to overwrite the table itself. */
1766 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
1767 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
1768
1769 /*
1770 * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
1771 *
1772 * If software reads the Cache Type fields from Ctype1
1773 * upwards, once it has seen a value of 0b000, no caches
1774 * exist at further-out levels of the hierarchy. So, for
1775 * example, if Ctype3 is the first Cache Type field with a
1776 * value of 0b000, the values of Ctype4 to Ctype7 must be
1777 * ignored.
1778 */
1779 get_clidr_el1(NULL, &clidr); /* Ugly... */
1780 cache_levels = clidr.val;
1781 for (i = 0; i < 7; i++)
1782 if (((cache_levels >> (i*3)) & 7) == 0)
1783 break;
1784 /* Clear all higher bits. */
1785 cache_levels &= (1 << (i*3))-1;
1786}
1787
1788/**
1789 * kvm_reset_sys_regs - sets system registers to reset value
1790 * @vcpu: The VCPU pointer
1791 *
1792 * This function finds the right table above and sets the registers on the
1793 * virtual CPU struct to their architecturally defined reset values.
1794 */
1795void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
1796{
1797 size_t num;
1798 const struct sys_reg_desc *table;
1799
1800 /* Catch someone adding a register without putting in reset entry. */
1801 memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
1802
1803 /* Generic chip reset first (so target could override). */
1804 reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
1805
62a89c44 1806 table = get_target_table(vcpu->arch.target, true, &num);
7c8c5e6a
MZ
1807 reset_sys_reg_descs(vcpu, table, num);
1808
1809 for (num = 1; num < NR_SYS_REGS; num++)
1810 if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
1811 panic("Didn't reset vcpu_sys_reg(%zi)", num);
1812}