2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * Derived from arch/arm/kvm/coproc.c:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Authors: Rusty Russell <rusty@rustcorp.com.au>
8 * Christoffer Dall <c.dall@virtualopensystems.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include <linux/bsearch.h>
24 #include <linux/kvm_host.h>
26 #include <linux/uaccess.h>
28 #include <asm/cacheflush.h>
29 #include <asm/cputype.h>
30 #include <asm/debug-monitors.h>
32 #include <asm/kvm_arm.h>
33 #include <asm/kvm_asm.h>
34 #include <asm/kvm_coproc.h>
35 #include <asm/kvm_emulate.h>
36 #include <asm/kvm_host.h>
37 #include <asm/kvm_mmu.h>
38 #include <asm/perf_event.h>
39 #include <asm/sysreg.h>
41 #include <trace/events/kvm.h>
48 * All of this file is extremly similar to the ARM coproc.c, but the
49 * types are different. My gut feeling is that it should be pretty
50 * easy to merge, but that would be an ABI breakage -- again. VFP
51 * would also need to be abstracted.
53 * For AArch32, we only take care of what is being trapped. Anything
54 * that has to do with init and userspace access has to go via the
58 static bool read_from_write_only(struct kvm_vcpu
*vcpu
,
59 struct sys_reg_params
*params
,
60 const struct sys_reg_desc
*r
)
62 WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
63 print_sys_reg_instr(params
);
64 kvm_inject_undefined(vcpu
);
68 /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
69 static u32 cache_levels
;
71 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
74 /* Which cache CCSIDR represents depends on CSSELR value. */
75 static u32
get_ccsidr(u32 csselr
)
79 /* Make sure noone else changes CSSELR during this! */
81 write_sysreg(csselr
, csselr_el1
);
83 ccsidr
= read_sysreg(ccsidr_el1
);
90 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
92 static bool access_dcsw(struct kvm_vcpu
*vcpu
,
93 struct sys_reg_params
*p
,
94 const struct sys_reg_desc
*r
)
97 return read_from_write_only(vcpu
, p
, r
);
99 kvm_set_way_flush(vcpu
);
104 * Generic accessor for VM registers. Only called as long as HCR_TVM
105 * is set. If the guest enables the MMU, we stop trapping the VM
106 * sys_regs and leave it in complete control of the caches.
108 static bool access_vm_reg(struct kvm_vcpu
*vcpu
,
109 struct sys_reg_params
*p
,
110 const struct sys_reg_desc
*r
)
112 bool was_enabled
= vcpu_has_cache_enabled(vcpu
);
114 BUG_ON(!p
->is_write
);
116 if (!p
->is_aarch32
) {
117 vcpu_sys_reg(vcpu
, r
->reg
) = p
->regval
;
120 vcpu_cp15_64_high(vcpu
, r
->reg
) = upper_32_bits(p
->regval
);
121 vcpu_cp15_64_low(vcpu
, r
->reg
) = lower_32_bits(p
->regval
);
124 kvm_toggle_cache(vcpu
, was_enabled
);
129 * Trap handler for the GICv3 SGI generation system register.
130 * Forward the request to the VGIC emulation.
131 * The cp15_64 code makes sure this automatically works
132 * for both AArch64 and AArch32 accesses.
134 static bool access_gic_sgi(struct kvm_vcpu
*vcpu
,
135 struct sys_reg_params
*p
,
136 const struct sys_reg_desc
*r
)
139 return read_from_write_only(vcpu
, p
, r
);
141 vgic_v3_dispatch_sgi(vcpu
, p
->regval
);
146 static bool access_gic_sre(struct kvm_vcpu
*vcpu
,
147 struct sys_reg_params
*p
,
148 const struct sys_reg_desc
*r
)
151 return ignore_write(vcpu
, p
);
153 p
->regval
= vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_sre
;
157 static bool trap_raz_wi(struct kvm_vcpu
*vcpu
,
158 struct sys_reg_params
*p
,
159 const struct sys_reg_desc
*r
)
162 return ignore_write(vcpu
, p
);
164 return read_zero(vcpu
, p
);
167 static bool trap_oslsr_el1(struct kvm_vcpu
*vcpu
,
168 struct sys_reg_params
*p
,
169 const struct sys_reg_desc
*r
)
172 return ignore_write(vcpu
, p
);
174 p
->regval
= (1 << 3);
179 static bool trap_dbgauthstatus_el1(struct kvm_vcpu
*vcpu
,
180 struct sys_reg_params
*p
,
181 const struct sys_reg_desc
*r
)
184 return ignore_write(vcpu
, p
);
186 p
->regval
= read_sysreg(dbgauthstatus_el1
);
192 * We want to avoid world-switching all the DBG registers all the
195 * - If we've touched any debug register, it is likely that we're
196 * going to touch more of them. It then makes sense to disable the
197 * traps and start doing the save/restore dance
198 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
199 * then mandatory to save/restore the registers, as the guest
202 * For this, we use a DIRTY bit, indicating the guest has modified the
203 * debug registers, used as follow:
206 * - If the dirty bit is set (because we're coming back from trapping),
207 * disable the traps, save host registers, restore guest registers.
208 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
209 * set the dirty bit, disable the traps, save host registers,
210 * restore guest registers.
211 * - Otherwise, enable the traps
214 * - If the dirty bit is set, save guest registers, restore host
215 * registers and clear the dirty bit. This ensure that the host can
216 * now use the debug registers.
218 static bool trap_debug_regs(struct kvm_vcpu
*vcpu
,
219 struct sys_reg_params
*p
,
220 const struct sys_reg_desc
*r
)
223 vcpu_sys_reg(vcpu
, r
->reg
) = p
->regval
;
224 vcpu
->arch
.debug_flags
|= KVM_ARM64_DEBUG_DIRTY
;
226 p
->regval
= vcpu_sys_reg(vcpu
, r
->reg
);
229 trace_trap_reg(__func__
, r
->reg
, p
->is_write
, p
->regval
);
235 * reg_to_dbg/dbg_to_reg
237 * A 32 bit write to a debug register leave top bits alone
238 * A 32 bit read from a debug register only returns the bottom bits
240 * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
241 * hyp.S code switches between host and guest values in future.
243 static void reg_to_dbg(struct kvm_vcpu
*vcpu
,
244 struct sys_reg_params
*p
,
251 val
|= ((*dbg_reg
>> 32) << 32);
255 vcpu
->arch
.debug_flags
|= KVM_ARM64_DEBUG_DIRTY
;
258 static void dbg_to_reg(struct kvm_vcpu
*vcpu
,
259 struct sys_reg_params
*p
,
262 p
->regval
= *dbg_reg
;
264 p
->regval
&= 0xffffffffUL
;
267 static bool trap_bvr(struct kvm_vcpu
*vcpu
,
268 struct sys_reg_params
*p
,
269 const struct sys_reg_desc
*rd
)
271 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->reg
];
274 reg_to_dbg(vcpu
, p
, dbg_reg
);
276 dbg_to_reg(vcpu
, p
, dbg_reg
);
278 trace_trap_reg(__func__
, rd
->reg
, p
->is_write
, *dbg_reg
);
283 static int set_bvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
284 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
286 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->reg
];
288 if (copy_from_user(r
, uaddr
, KVM_REG_SIZE(reg
->id
)) != 0)
293 static int get_bvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
294 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
296 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->reg
];
298 if (copy_to_user(uaddr
, r
, KVM_REG_SIZE(reg
->id
)) != 0)
303 static void reset_bvr(struct kvm_vcpu
*vcpu
,
304 const struct sys_reg_desc
*rd
)
306 vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->reg
] = rd
->val
;
309 static bool trap_bcr(struct kvm_vcpu
*vcpu
,
310 struct sys_reg_params
*p
,
311 const struct sys_reg_desc
*rd
)
313 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->reg
];
316 reg_to_dbg(vcpu
, p
, dbg_reg
);
318 dbg_to_reg(vcpu
, p
, dbg_reg
);
320 trace_trap_reg(__func__
, rd
->reg
, p
->is_write
, *dbg_reg
);
325 static int set_bcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
326 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
328 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->reg
];
330 if (copy_from_user(r
, uaddr
, KVM_REG_SIZE(reg
->id
)) != 0)
336 static int get_bcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
337 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
339 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->reg
];
341 if (copy_to_user(uaddr
, r
, KVM_REG_SIZE(reg
->id
)) != 0)
346 static void reset_bcr(struct kvm_vcpu
*vcpu
,
347 const struct sys_reg_desc
*rd
)
349 vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->reg
] = rd
->val
;
352 static bool trap_wvr(struct kvm_vcpu
*vcpu
,
353 struct sys_reg_params
*p
,
354 const struct sys_reg_desc
*rd
)
356 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->reg
];
359 reg_to_dbg(vcpu
, p
, dbg_reg
);
361 dbg_to_reg(vcpu
, p
, dbg_reg
);
363 trace_trap_reg(__func__
, rd
->reg
, p
->is_write
,
364 vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->reg
]);
369 static int set_wvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
370 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
372 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->reg
];
374 if (copy_from_user(r
, uaddr
, KVM_REG_SIZE(reg
->id
)) != 0)
379 static int get_wvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
380 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
382 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->reg
];
384 if (copy_to_user(uaddr
, r
, KVM_REG_SIZE(reg
->id
)) != 0)
389 static void reset_wvr(struct kvm_vcpu
*vcpu
,
390 const struct sys_reg_desc
*rd
)
392 vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->reg
] = rd
->val
;
395 static bool trap_wcr(struct kvm_vcpu
*vcpu
,
396 struct sys_reg_params
*p
,
397 const struct sys_reg_desc
*rd
)
399 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->reg
];
402 reg_to_dbg(vcpu
, p
, dbg_reg
);
404 dbg_to_reg(vcpu
, p
, dbg_reg
);
406 trace_trap_reg(__func__
, rd
->reg
, p
->is_write
, *dbg_reg
);
411 static int set_wcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
412 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
414 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->reg
];
416 if (copy_from_user(r
, uaddr
, KVM_REG_SIZE(reg
->id
)) != 0)
421 static int get_wcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
422 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
424 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->reg
];
426 if (copy_to_user(uaddr
, r
, KVM_REG_SIZE(reg
->id
)) != 0)
431 static void reset_wcr(struct kvm_vcpu
*vcpu
,
432 const struct sys_reg_desc
*rd
)
434 vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->reg
] = rd
->val
;
437 static void reset_amair_el1(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
439 vcpu_sys_reg(vcpu
, AMAIR_EL1
) = read_sysreg(amair_el1
);
442 static void reset_mpidr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
447 * Map the vcpu_id into the first three affinity level fields of
448 * the MPIDR. We limit the number of VCPUs in level 0 due to a
449 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
450 * of the GICv3 to be able to address each CPU directly when
453 mpidr
= (vcpu
->vcpu_id
& 0x0f) << MPIDR_LEVEL_SHIFT(0);
454 mpidr
|= ((vcpu
->vcpu_id
>> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
455 mpidr
|= ((vcpu
->vcpu_id
>> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
456 vcpu_sys_reg(vcpu
, MPIDR_EL1
) = (1ULL << 31) | mpidr
;
459 static void reset_pmcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
463 pmcr
= read_sysreg(pmcr_el0
);
465 * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
466 * except PMCR.E resetting to zero.
468 val
= ((pmcr
& ~ARMV8_PMU_PMCR_MASK
)
469 | (ARMV8_PMU_PMCR_MASK
& 0xdecafbad)) & (~ARMV8_PMU_PMCR_E
);
470 vcpu_sys_reg(vcpu
, PMCR_EL0
) = val
;
473 static bool pmu_access_el0_disabled(struct kvm_vcpu
*vcpu
)
475 u64 reg
= vcpu_sys_reg(vcpu
, PMUSERENR_EL0
);
477 return !((reg
& ARMV8_PMU_USERENR_EN
) || vcpu_mode_priv(vcpu
));
480 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu
*vcpu
)
482 u64 reg
= vcpu_sys_reg(vcpu
, PMUSERENR_EL0
);
484 return !((reg
& (ARMV8_PMU_USERENR_SW
| ARMV8_PMU_USERENR_EN
))
485 || vcpu_mode_priv(vcpu
));
488 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu
*vcpu
)
490 u64 reg
= vcpu_sys_reg(vcpu
, PMUSERENR_EL0
);
492 return !((reg
& (ARMV8_PMU_USERENR_CR
| ARMV8_PMU_USERENR_EN
))
493 || vcpu_mode_priv(vcpu
));
496 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu
*vcpu
)
498 u64 reg
= vcpu_sys_reg(vcpu
, PMUSERENR_EL0
);
500 return !((reg
& (ARMV8_PMU_USERENR_ER
| ARMV8_PMU_USERENR_EN
))
501 || vcpu_mode_priv(vcpu
));
504 static bool access_pmcr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
505 const struct sys_reg_desc
*r
)
509 if (!kvm_arm_pmu_v3_ready(vcpu
))
510 return trap_raz_wi(vcpu
, p
, r
);
512 if (pmu_access_el0_disabled(vcpu
))
516 /* Only update writeable bits of PMCR */
517 val
= vcpu_sys_reg(vcpu
, PMCR_EL0
);
518 val
&= ~ARMV8_PMU_PMCR_MASK
;
519 val
|= p
->regval
& ARMV8_PMU_PMCR_MASK
;
520 vcpu_sys_reg(vcpu
, PMCR_EL0
) = val
;
521 kvm_pmu_handle_pmcr(vcpu
, val
);
523 /* PMCR.P & PMCR.C are RAZ */
524 val
= vcpu_sys_reg(vcpu
, PMCR_EL0
)
525 & ~(ARMV8_PMU_PMCR_P
| ARMV8_PMU_PMCR_C
);
532 static bool access_pmselr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
533 const struct sys_reg_desc
*r
)
535 if (!kvm_arm_pmu_v3_ready(vcpu
))
536 return trap_raz_wi(vcpu
, p
, r
);
538 if (pmu_access_event_counter_el0_disabled(vcpu
))
542 vcpu_sys_reg(vcpu
, PMSELR_EL0
) = p
->regval
;
544 /* return PMSELR.SEL field */
545 p
->regval
= vcpu_sys_reg(vcpu
, PMSELR_EL0
)
546 & ARMV8_PMU_COUNTER_MASK
;
551 static bool access_pmceid(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
552 const struct sys_reg_desc
*r
)
556 if (!kvm_arm_pmu_v3_ready(vcpu
))
557 return trap_raz_wi(vcpu
, p
, r
);
561 if (pmu_access_el0_disabled(vcpu
))
565 pmceid
= read_sysreg(pmceid0_el0
);
567 pmceid
= read_sysreg(pmceid1_el0
);
574 static bool pmu_counter_idx_valid(struct kvm_vcpu
*vcpu
, u64 idx
)
578 pmcr
= vcpu_sys_reg(vcpu
, PMCR_EL0
);
579 val
= (pmcr
>> ARMV8_PMU_PMCR_N_SHIFT
) & ARMV8_PMU_PMCR_N_MASK
;
580 if (idx
>= val
&& idx
!= ARMV8_PMU_CYCLE_IDX
)
586 static bool access_pmu_evcntr(struct kvm_vcpu
*vcpu
,
587 struct sys_reg_params
*p
,
588 const struct sys_reg_desc
*r
)
592 if (!kvm_arm_pmu_v3_ready(vcpu
))
593 return trap_raz_wi(vcpu
, p
, r
);
595 if (r
->CRn
== 9 && r
->CRm
== 13) {
598 if (pmu_access_event_counter_el0_disabled(vcpu
))
601 idx
= vcpu_sys_reg(vcpu
, PMSELR_EL0
)
602 & ARMV8_PMU_COUNTER_MASK
;
603 } else if (r
->Op2
== 0) {
605 if (pmu_access_cycle_counter_el0_disabled(vcpu
))
608 idx
= ARMV8_PMU_CYCLE_IDX
;
612 } else if (r
->CRn
== 0 && r
->CRm
== 9) {
614 if (pmu_access_event_counter_el0_disabled(vcpu
))
617 idx
= ARMV8_PMU_CYCLE_IDX
;
618 } else if (r
->CRn
== 14 && (r
->CRm
& 12) == 8) {
620 if (pmu_access_event_counter_el0_disabled(vcpu
))
623 idx
= ((r
->CRm
& 3) << 3) | (r
->Op2
& 7);
628 if (!pmu_counter_idx_valid(vcpu
, idx
))
632 if (pmu_access_el0_disabled(vcpu
))
635 kvm_pmu_set_counter_value(vcpu
, idx
, p
->regval
);
637 p
->regval
= kvm_pmu_get_counter_value(vcpu
, idx
);
643 static bool access_pmu_evtyper(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
644 const struct sys_reg_desc
*r
)
648 if (!kvm_arm_pmu_v3_ready(vcpu
))
649 return trap_raz_wi(vcpu
, p
, r
);
651 if (pmu_access_el0_disabled(vcpu
))
654 if (r
->CRn
== 9 && r
->CRm
== 13 && r
->Op2
== 1) {
656 idx
= vcpu_sys_reg(vcpu
, PMSELR_EL0
) & ARMV8_PMU_COUNTER_MASK
;
657 reg
= PMEVTYPER0_EL0
+ idx
;
658 } else if (r
->CRn
== 14 && (r
->CRm
& 12) == 12) {
659 idx
= ((r
->CRm
& 3) << 3) | (r
->Op2
& 7);
660 if (idx
== ARMV8_PMU_CYCLE_IDX
)
664 reg
= PMEVTYPER0_EL0
+ idx
;
669 if (!pmu_counter_idx_valid(vcpu
, idx
))
673 kvm_pmu_set_counter_event_type(vcpu
, p
->regval
, idx
);
674 vcpu_sys_reg(vcpu
, reg
) = p
->regval
& ARMV8_PMU_EVTYPE_MASK
;
676 p
->regval
= vcpu_sys_reg(vcpu
, reg
) & ARMV8_PMU_EVTYPE_MASK
;
682 static bool access_pmcnten(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
683 const struct sys_reg_desc
*r
)
687 if (!kvm_arm_pmu_v3_ready(vcpu
))
688 return trap_raz_wi(vcpu
, p
, r
);
690 if (pmu_access_el0_disabled(vcpu
))
693 mask
= kvm_pmu_valid_counter_mask(vcpu
);
695 val
= p
->regval
& mask
;
697 /* accessing PMCNTENSET_EL0 */
698 vcpu_sys_reg(vcpu
, PMCNTENSET_EL0
) |= val
;
699 kvm_pmu_enable_counter(vcpu
, val
);
701 /* accessing PMCNTENCLR_EL0 */
702 vcpu_sys_reg(vcpu
, PMCNTENSET_EL0
) &= ~val
;
703 kvm_pmu_disable_counter(vcpu
, val
);
706 p
->regval
= vcpu_sys_reg(vcpu
, PMCNTENSET_EL0
) & mask
;
712 static bool access_pminten(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
713 const struct sys_reg_desc
*r
)
715 u64 mask
= kvm_pmu_valid_counter_mask(vcpu
);
717 if (!kvm_arm_pmu_v3_ready(vcpu
))
718 return trap_raz_wi(vcpu
, p
, r
);
720 if (!vcpu_mode_priv(vcpu
))
724 u64 val
= p
->regval
& mask
;
727 /* accessing PMINTENSET_EL1 */
728 vcpu_sys_reg(vcpu
, PMINTENSET_EL1
) |= val
;
730 /* accessing PMINTENCLR_EL1 */
731 vcpu_sys_reg(vcpu
, PMINTENSET_EL1
) &= ~val
;
733 p
->regval
= vcpu_sys_reg(vcpu
, PMINTENSET_EL1
) & mask
;
739 static bool access_pmovs(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
740 const struct sys_reg_desc
*r
)
742 u64 mask
= kvm_pmu_valid_counter_mask(vcpu
);
744 if (!kvm_arm_pmu_v3_ready(vcpu
))
745 return trap_raz_wi(vcpu
, p
, r
);
747 if (pmu_access_el0_disabled(vcpu
))
752 /* accessing PMOVSSET_EL0 */
753 kvm_pmu_overflow_set(vcpu
, p
->regval
& mask
);
755 /* accessing PMOVSCLR_EL0 */
756 vcpu_sys_reg(vcpu
, PMOVSSET_EL0
) &= ~(p
->regval
& mask
);
758 p
->regval
= vcpu_sys_reg(vcpu
, PMOVSSET_EL0
) & mask
;
764 static bool access_pmswinc(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
765 const struct sys_reg_desc
*r
)
769 if (!kvm_arm_pmu_v3_ready(vcpu
))
770 return trap_raz_wi(vcpu
, p
, r
);
772 if (pmu_write_swinc_el0_disabled(vcpu
))
776 mask
= kvm_pmu_valid_counter_mask(vcpu
);
777 kvm_pmu_software_increment(vcpu
, p
->regval
& mask
);
784 static bool access_pmuserenr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
785 const struct sys_reg_desc
*r
)
787 if (!kvm_arm_pmu_v3_ready(vcpu
))
788 return trap_raz_wi(vcpu
, p
, r
);
791 if (!vcpu_mode_priv(vcpu
))
794 vcpu_sys_reg(vcpu
, PMUSERENR_EL0
) = p
->regval
795 & ARMV8_PMU_USERENR_MASK
;
797 p
->regval
= vcpu_sys_reg(vcpu
, PMUSERENR_EL0
)
798 & ARMV8_PMU_USERENR_MASK
;
804 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
805 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
807 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \
808 trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \
810 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \
811 trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \
813 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \
814 trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \
816 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \
817 trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
819 /* Macro to expand the PMEVCNTRn_EL0 register */
820 #define PMU_PMEVCNTR_EL0(n) \
821 /* PMEVCNTRn_EL0 */ \
822 { Op0(0b11), Op1(0b011), CRn(0b1110), \
823 CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
824 access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
826 /* Macro to expand the PMEVTYPERn_EL0 register */
827 #define PMU_PMEVTYPER_EL0(n) \
828 /* PMEVTYPERn_EL0 */ \
829 { Op0(0b11), Op1(0b011), CRn(0b1110), \
830 CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
831 access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
834 * Architected system registers.
835 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
837 * Debug handling: We do trap most, if not all debug related system
838 * registers. The implementation is good enough to ensure that a guest
839 * can use these with minimal performance degradation. The drawback is
840 * that we don't implement any of the external debug, none of the
841 * OSlock protocol. This should be revisited if we ever encounter a
842 * more demanding guest...
844 static const struct sys_reg_desc sys_reg_descs
[] = {
846 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010),
849 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010),
852 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
855 DBG_BCR_BVR_WCR_WVR_EL1(0),
856 DBG_BCR_BVR_WCR_WVR_EL1(1),
858 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
859 trap_debug_regs
, reset_val
, MDCCINT_EL1
, 0 },
861 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
862 trap_debug_regs
, reset_val
, MDSCR_EL1
, 0 },
863 DBG_BCR_BVR_WCR_WVR_EL1(2),
864 DBG_BCR_BVR_WCR_WVR_EL1(3),
865 DBG_BCR_BVR_WCR_WVR_EL1(4),
866 DBG_BCR_BVR_WCR_WVR_EL1(5),
867 DBG_BCR_BVR_WCR_WVR_EL1(6),
868 DBG_BCR_BVR_WCR_WVR_EL1(7),
869 DBG_BCR_BVR_WCR_WVR_EL1(8),
870 DBG_BCR_BVR_WCR_WVR_EL1(9),
871 DBG_BCR_BVR_WCR_WVR_EL1(10),
872 DBG_BCR_BVR_WCR_WVR_EL1(11),
873 DBG_BCR_BVR_WCR_WVR_EL1(12),
874 DBG_BCR_BVR_WCR_WVR_EL1(13),
875 DBG_BCR_BVR_WCR_WVR_EL1(14),
876 DBG_BCR_BVR_WCR_WVR_EL1(15),
879 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
882 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100),
885 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100),
888 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100),
891 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100),
893 /* DBGCLAIMSET_EL1 */
894 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110),
896 /* DBGCLAIMCLR_EL1 */
897 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110),
899 /* DBGAUTHSTATUS_EL1 */
900 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110),
901 trap_dbgauthstatus_el1
},
904 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000),
907 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000),
909 /* DBGDTR[TR]X_EL0 */
910 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000),
914 { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
915 NULL
, reset_val
, DBGVCR32_EL2
, 0 },
918 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101),
919 NULL
, reset_mpidr
, MPIDR_EL1
},
921 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
922 access_vm_reg
, reset_val
, SCTLR_EL1
, 0x00C50078 },
924 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
925 NULL
, reset_val
, CPACR_EL1
, 0 },
927 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
928 access_vm_reg
, reset_unknown
, TTBR0_EL1
},
930 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
931 access_vm_reg
, reset_unknown
, TTBR1_EL1
},
933 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
934 access_vm_reg
, reset_val
, TCR_EL1
, 0 },
937 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
938 access_vm_reg
, reset_unknown
, AFSR0_EL1
},
940 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
941 access_vm_reg
, reset_unknown
, AFSR1_EL1
},
943 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
944 access_vm_reg
, reset_unknown
, ESR_EL1
},
946 { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
947 access_vm_reg
, reset_unknown
, FAR_EL1
},
949 { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
950 NULL
, reset_unknown
, PAR_EL1
},
953 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
954 access_pminten
, reset_unknown
, PMINTENSET_EL1
},
956 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
957 access_pminten
, NULL
, PMINTENSET_EL1
},
960 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
961 access_vm_reg
, reset_unknown
, MAIR_EL1
},
963 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
964 access_vm_reg
, reset_amair_el1
, AMAIR_EL1
},
967 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
968 NULL
, reset_val
, VBAR_EL1
, 0 },
971 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1000), Op2(0b001),
972 read_from_write_only
},
974 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1011), Op2(0b001),
975 read_from_write_only
},
977 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1011), Op2(0b101),
980 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b001),
981 read_from_write_only
},
983 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
987 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
988 access_vm_reg
, reset_val
, CONTEXTIDR_EL1
, 0 },
990 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
991 NULL
, reset_unknown
, TPIDR_EL1
},
994 { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000),
995 NULL
, reset_val
, CNTKCTL_EL1
, 0},
998 { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
999 NULL
, reset_unknown
, CSSELR_EL1
},
1002 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
1003 access_pmcr
, reset_pmcr
, },
1004 /* PMCNTENSET_EL0 */
1005 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
1006 access_pmcnten
, reset_unknown
, PMCNTENSET_EL0
},
1007 /* PMCNTENCLR_EL0 */
1008 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
1009 access_pmcnten
, NULL
, PMCNTENSET_EL0
},
1011 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
1012 access_pmovs
, NULL
, PMOVSSET_EL0
},
1014 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
1015 access_pmswinc
, reset_unknown
, PMSWINC_EL0
},
1017 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
1018 access_pmselr
, reset_unknown
, PMSELR_EL0
},
1020 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
1023 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
1026 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
1027 access_pmu_evcntr
, reset_unknown
, PMCCNTR_EL0
},
1028 /* PMXEVTYPER_EL0 */
1029 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
1030 access_pmu_evtyper
},
1032 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
1033 access_pmu_evcntr
},
1035 * This register resets as unknown in 64bit mode while it resets as zero
1036 * in 32bit mode. Here we choose to reset it as zero for consistency.
1038 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
1039 access_pmuserenr
, reset_val
, PMUSERENR_EL0
, 0 },
1041 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
1042 access_pmovs
, reset_unknown
, PMOVSSET_EL0
},
1045 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
1046 NULL
, reset_unknown
, TPIDR_EL0
},
1048 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
1049 NULL
, reset_unknown
, TPIDRRO_EL0
},
1052 PMU_PMEVCNTR_EL0(0),
1053 PMU_PMEVCNTR_EL0(1),
1054 PMU_PMEVCNTR_EL0(2),
1055 PMU_PMEVCNTR_EL0(3),
1056 PMU_PMEVCNTR_EL0(4),
1057 PMU_PMEVCNTR_EL0(5),
1058 PMU_PMEVCNTR_EL0(6),
1059 PMU_PMEVCNTR_EL0(7),
1060 PMU_PMEVCNTR_EL0(8),
1061 PMU_PMEVCNTR_EL0(9),
1062 PMU_PMEVCNTR_EL0(10),
1063 PMU_PMEVCNTR_EL0(11),
1064 PMU_PMEVCNTR_EL0(12),
1065 PMU_PMEVCNTR_EL0(13),
1066 PMU_PMEVCNTR_EL0(14),
1067 PMU_PMEVCNTR_EL0(15),
1068 PMU_PMEVCNTR_EL0(16),
1069 PMU_PMEVCNTR_EL0(17),
1070 PMU_PMEVCNTR_EL0(18),
1071 PMU_PMEVCNTR_EL0(19),
1072 PMU_PMEVCNTR_EL0(20),
1073 PMU_PMEVCNTR_EL0(21),
1074 PMU_PMEVCNTR_EL0(22),
1075 PMU_PMEVCNTR_EL0(23),
1076 PMU_PMEVCNTR_EL0(24),
1077 PMU_PMEVCNTR_EL0(25),
1078 PMU_PMEVCNTR_EL0(26),
1079 PMU_PMEVCNTR_EL0(27),
1080 PMU_PMEVCNTR_EL0(28),
1081 PMU_PMEVCNTR_EL0(29),
1082 PMU_PMEVCNTR_EL0(30),
1083 /* PMEVTYPERn_EL0 */
1084 PMU_PMEVTYPER_EL0(0),
1085 PMU_PMEVTYPER_EL0(1),
1086 PMU_PMEVTYPER_EL0(2),
1087 PMU_PMEVTYPER_EL0(3),
1088 PMU_PMEVTYPER_EL0(4),
1089 PMU_PMEVTYPER_EL0(5),
1090 PMU_PMEVTYPER_EL0(6),
1091 PMU_PMEVTYPER_EL0(7),
1092 PMU_PMEVTYPER_EL0(8),
1093 PMU_PMEVTYPER_EL0(9),
1094 PMU_PMEVTYPER_EL0(10),
1095 PMU_PMEVTYPER_EL0(11),
1096 PMU_PMEVTYPER_EL0(12),
1097 PMU_PMEVTYPER_EL0(13),
1098 PMU_PMEVTYPER_EL0(14),
1099 PMU_PMEVTYPER_EL0(15),
1100 PMU_PMEVTYPER_EL0(16),
1101 PMU_PMEVTYPER_EL0(17),
1102 PMU_PMEVTYPER_EL0(18),
1103 PMU_PMEVTYPER_EL0(19),
1104 PMU_PMEVTYPER_EL0(20),
1105 PMU_PMEVTYPER_EL0(21),
1106 PMU_PMEVTYPER_EL0(22),
1107 PMU_PMEVTYPER_EL0(23),
1108 PMU_PMEVTYPER_EL0(24),
1109 PMU_PMEVTYPER_EL0(25),
1110 PMU_PMEVTYPER_EL0(26),
1111 PMU_PMEVTYPER_EL0(27),
1112 PMU_PMEVTYPER_EL0(28),
1113 PMU_PMEVTYPER_EL0(29),
1114 PMU_PMEVTYPER_EL0(30),
1116 * This register resets as unknown in 64bit mode while it resets as zero
1117 * in 32bit mode. Here we choose to reset it as zero for consistency.
1119 { Op0(0b11), Op1(0b011), CRn(0b1110), CRm(0b1111), Op2(0b111),
1120 access_pmu_evtyper
, reset_val
, PMCCFILTR_EL0
, 0 },
1123 { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
1124 NULL
, reset_unknown
, DACR32_EL2
},
1126 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001),
1127 NULL
, reset_unknown
, IFSR32_EL2
},
1129 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000),
1130 NULL
, reset_val
, FPEXC32_EL2
, 0x70 },
1133 static bool trap_dbgidr(struct kvm_vcpu
*vcpu
,
1134 struct sys_reg_params
*p
,
1135 const struct sys_reg_desc
*r
)
1138 return ignore_write(vcpu
, p
);
1140 u64 dfr
= read_system_reg(SYS_ID_AA64DFR0_EL1
);
1141 u64 pfr
= read_system_reg(SYS_ID_AA64PFR0_EL1
);
1142 u32 el3
= !!cpuid_feature_extract_unsigned_field(pfr
, ID_AA64PFR0_EL3_SHIFT
);
1144 p
->regval
= ((((dfr
>> ID_AA64DFR0_WRPS_SHIFT
) & 0xf) << 28) |
1145 (((dfr
>> ID_AA64DFR0_BRPS_SHIFT
) & 0xf) << 24) |
1146 (((dfr
>> ID_AA64DFR0_CTX_CMPS_SHIFT
) & 0xf) << 20)
1147 | (6 << 16) | (el3
<< 14) | (el3
<< 12));
1152 static bool trap_debug32(struct kvm_vcpu
*vcpu
,
1153 struct sys_reg_params
*p
,
1154 const struct sys_reg_desc
*r
)
1157 vcpu_cp14(vcpu
, r
->reg
) = p
->regval
;
1158 vcpu
->arch
.debug_flags
|= KVM_ARM64_DEBUG_DIRTY
;
1160 p
->regval
= vcpu_cp14(vcpu
, r
->reg
);
1166 /* AArch32 debug register mappings
1168 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
1169 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
1171 * All control registers and watchpoint value registers are mapped to
1172 * the lower 32 bits of their AArch64 equivalents. We share the trap
1173 * handlers with the above AArch64 code which checks what mode the
1177 static bool trap_xvr(struct kvm_vcpu
*vcpu
,
1178 struct sys_reg_params
*p
,
1179 const struct sys_reg_desc
*rd
)
1181 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->reg
];
1186 val
&= 0xffffffffUL
;
1187 val
|= p
->regval
<< 32;
1190 vcpu
->arch
.debug_flags
|= KVM_ARM64_DEBUG_DIRTY
;
1192 p
->regval
= *dbg_reg
>> 32;
1195 trace_trap_reg(__func__
, rd
->reg
, p
->is_write
, *dbg_reg
);
1200 #define DBG_BCR_BVR_WCR_WVR(n) \
1202 { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
1204 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
1206 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
1208 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
1210 #define DBGBXVR(n) \
1211 { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
1214 * Trapped cp14 registers. We generally ignore most of the external
1215 * debug, on the principle that they don't really make sense to a
1216 * guest. Revisit this one day, would this principle change.
1218 static const struct sys_reg_desc cp14_regs
[] = {
1220 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr
},
1222 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi
},
1224 DBG_BCR_BVR_WCR_WVR(0),
1226 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi
},
1227 DBG_BCR_BVR_WCR_WVR(1),
1229 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32
},
1231 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32
},
1232 DBG_BCR_BVR_WCR_WVR(2),
1233 /* DBGDTR[RT]Xint */
1234 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi
},
1235 /* DBGDTR[RT]Xext */
1236 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi
},
1237 DBG_BCR_BVR_WCR_WVR(3),
1238 DBG_BCR_BVR_WCR_WVR(4),
1239 DBG_BCR_BVR_WCR_WVR(5),
1241 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi
},
1243 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi
},
1244 DBG_BCR_BVR_WCR_WVR(6),
1246 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32
},
1247 DBG_BCR_BVR_WCR_WVR(7),
1248 DBG_BCR_BVR_WCR_WVR(8),
1249 DBG_BCR_BVR_WCR_WVR(9),
1250 DBG_BCR_BVR_WCR_WVR(10),
1251 DBG_BCR_BVR_WCR_WVR(11),
1252 DBG_BCR_BVR_WCR_WVR(12),
1253 DBG_BCR_BVR_WCR_WVR(13),
1254 DBG_BCR_BVR_WCR_WVR(14),
1255 DBG_BCR_BVR_WCR_WVR(15),
1257 /* DBGDRAR (32bit) */
1258 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi
},
1262 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi
},
1265 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1
},
1269 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi
},
1272 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi
},
1285 /* DBGDSAR (32bit) */
1286 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi
},
1289 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi
},
1291 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi
},
1293 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi
},
1295 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi
},
1297 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi
},
1299 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1
},
1302 /* Trapped cp14 64bit registers */
1303 static const struct sys_reg_desc cp14_64_regs
[] = {
1304 /* DBGDRAR (64bit) */
1305 { Op1( 0), CRm( 1), .access
= trap_raz_wi
},
1307 /* DBGDSAR (64bit) */
1308 { Op1( 0), CRm( 2), .access
= trap_raz_wi
},
1311 /* Macro to expand the PMEVCNTRn register */
1312 #define PMU_PMEVCNTR(n) \
1314 { Op1(0), CRn(0b1110), \
1315 CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1318 /* Macro to expand the PMEVTYPERn register */
1319 #define PMU_PMEVTYPER(n) \
1321 { Op1(0), CRn(0b1110), \
1322 CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1323 access_pmu_evtyper }
1326 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
1327 * depending on the way they are accessed (as a 32bit or a 64bit
1330 static const struct sys_reg_desc cp15_regs
[] = {
1331 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi
},
1333 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c1_SCTLR
},
1334 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c2_TTBR0
},
1335 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg
, NULL
, c2_TTBR1
},
1336 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg
, NULL
, c2_TTBCR
},
1337 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c3_DACR
},
1338 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c5_DFSR
},
1339 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg
, NULL
, c5_IFSR
},
1340 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg
, NULL
, c5_ADFSR
},
1341 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg
, NULL
, c5_AIFSR
},
1342 { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c6_DFAR
},
1343 { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg
, NULL
, c6_IFAR
},
1346 * DC{C,I,CI}SW operations:
1348 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw
},
1349 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw
},
1350 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw
},
1353 { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr
},
1354 { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten
},
1355 { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten
},
1356 { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs
},
1357 { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc
},
1358 { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr
},
1359 { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid
},
1360 { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid
},
1361 { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr
},
1362 { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper
},
1363 { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr
},
1364 { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr
},
1365 { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten
},
1366 { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten
},
1367 { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs
},
1369 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg
, NULL
, c10_PRRR
},
1370 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg
, NULL
, c10_NMRR
},
1371 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg
, NULL
, c10_AMAIR0
},
1372 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg
, NULL
, c10_AMAIR1
},
1375 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre
},
1377 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg
, NULL
, c13_CID
},
1444 { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper
},
1447 static const struct sys_reg_desc cp15_64_regs
[] = {
1448 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg
, NULL
, c2_TTBR0
},
1449 { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr
},
1450 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi
},
1451 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg
, NULL
, c2_TTBR1
},
1454 /* Target specific emulation tables */
1455 static struct kvm_sys_reg_target_table
*target_tables
[KVM_ARM_NUM_TARGETS
];
1457 void kvm_register_target_sys_reg_table(unsigned int target
,
1458 struct kvm_sys_reg_target_table
*table
)
1460 target_tables
[target
] = table
;
1463 /* Get specific register table for this target. */
1464 static const struct sys_reg_desc
*get_target_table(unsigned target
,
1468 struct kvm_sys_reg_target_table
*table
;
1470 table
= target_tables
[target
];
1472 *num
= table
->table64
.num
;
1473 return table
->table64
.table
;
1475 *num
= table
->table32
.num
;
1476 return table
->table32
.table
;
1480 #define reg_to_match_value(x) \
1482 unsigned long val; \
1483 val = (x)->Op0 << 14; \
1484 val |= (x)->Op1 << 11; \
1485 val |= (x)->CRn << 7; \
1486 val |= (x)->CRm << 3; \
1491 static int match_sys_reg(const void *key
, const void *elt
)
1493 const unsigned long pval
= (unsigned long)key
;
1494 const struct sys_reg_desc
*r
= elt
;
1496 return pval
- reg_to_match_value(r
);
1499 static const struct sys_reg_desc
*find_reg(const struct sys_reg_params
*params
,
1500 const struct sys_reg_desc table
[],
1503 unsigned long pval
= reg_to_match_value(params
);
1505 return bsearch((void *)pval
, table
, num
, sizeof(table
[0]), match_sys_reg
);
1508 int kvm_handle_cp14_load_store(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1510 kvm_inject_undefined(vcpu
);
1515 * emulate_cp -- tries to match a sys_reg access in a handling table, and
1516 * call the corresponding trap handler.
1518 * @params: pointer to the descriptor of the access
1519 * @table: array of trap descriptors
1520 * @num: size of the trap descriptor array
1522 * Return 0 if the access has been handled, and -1 if not.
1524 static int emulate_cp(struct kvm_vcpu
*vcpu
,
1525 struct sys_reg_params
*params
,
1526 const struct sys_reg_desc
*table
,
1529 const struct sys_reg_desc
*r
;
1532 return -1; /* Not handled */
1534 r
= find_reg(params
, table
, num
);
1538 * Not having an accessor means that we have
1539 * configured a trap that we don't know how to
1540 * handle. This certainly qualifies as a gross bug
1541 * that should be fixed right away.
1545 if (likely(r
->access(vcpu
, params
, r
))) {
1546 /* Skip instruction, since it was emulated */
1547 kvm_skip_instr(vcpu
, kvm_vcpu_trap_il_is32bit(vcpu
));
1557 static void unhandled_cp_access(struct kvm_vcpu
*vcpu
,
1558 struct sys_reg_params
*params
)
1560 u8 hsr_ec
= kvm_vcpu_trap_get_class(vcpu
);
1564 case ESR_ELx_EC_CP15_32
:
1565 case ESR_ELx_EC_CP15_64
:
1568 case ESR_ELx_EC_CP14_MR
:
1569 case ESR_ELx_EC_CP14_64
:
1576 kvm_err("Unsupported guest CP%d access at: %08lx\n",
1577 cp
, *vcpu_pc(vcpu
));
1578 print_sys_reg_instr(params
);
1579 kvm_inject_undefined(vcpu
);
1583 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
1584 * @vcpu: The VCPU pointer
1585 * @run: The kvm_run struct
1587 static int kvm_handle_cp_64(struct kvm_vcpu
*vcpu
,
1588 const struct sys_reg_desc
*global
,
1590 const struct sys_reg_desc
*target_specific
,
1593 struct sys_reg_params params
;
1594 u32 hsr
= kvm_vcpu_get_hsr(vcpu
);
1595 int Rt
= kvm_vcpu_sys_get_rt(vcpu
);
1596 int Rt2
= (hsr
>> 10) & 0x1f;
1598 params
.is_aarch32
= true;
1599 params
.is_32bit
= false;
1600 params
.CRm
= (hsr
>> 1) & 0xf;
1601 params
.is_write
= ((hsr
& 1) == 0);
1604 params
.Op1
= (hsr
>> 16) & 0xf;
1609 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
1610 * backends between AArch32 and AArch64, we get away with it.
1612 if (params
.is_write
) {
1613 params
.regval
= vcpu_get_reg(vcpu
, Rt
) & 0xffffffff;
1614 params
.regval
|= vcpu_get_reg(vcpu
, Rt2
) << 32;
1617 if (!emulate_cp(vcpu
, ¶ms
, target_specific
, nr_specific
))
1619 if (!emulate_cp(vcpu
, ¶ms
, global
, nr_global
))
1622 unhandled_cp_access(vcpu
, ¶ms
);
1625 /* Split up the value between registers for the read side */
1626 if (!params
.is_write
) {
1627 vcpu_set_reg(vcpu
, Rt
, lower_32_bits(params
.regval
));
1628 vcpu_set_reg(vcpu
, Rt2
, upper_32_bits(params
.regval
));
1635 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
1636 * @vcpu: The VCPU pointer
1637 * @run: The kvm_run struct
1639 static int kvm_handle_cp_32(struct kvm_vcpu
*vcpu
,
1640 const struct sys_reg_desc
*global
,
1642 const struct sys_reg_desc
*target_specific
,
1645 struct sys_reg_params params
;
1646 u32 hsr
= kvm_vcpu_get_hsr(vcpu
);
1647 int Rt
= kvm_vcpu_sys_get_rt(vcpu
);
1649 params
.is_aarch32
= true;
1650 params
.is_32bit
= true;
1651 params
.CRm
= (hsr
>> 1) & 0xf;
1652 params
.regval
= vcpu_get_reg(vcpu
, Rt
);
1653 params
.is_write
= ((hsr
& 1) == 0);
1654 params
.CRn
= (hsr
>> 10) & 0xf;
1656 params
.Op1
= (hsr
>> 14) & 0x7;
1657 params
.Op2
= (hsr
>> 17) & 0x7;
1659 if (!emulate_cp(vcpu
, ¶ms
, target_specific
, nr_specific
) ||
1660 !emulate_cp(vcpu
, ¶ms
, global
, nr_global
)) {
1661 if (!params
.is_write
)
1662 vcpu_set_reg(vcpu
, Rt
, params
.regval
);
1666 unhandled_cp_access(vcpu
, ¶ms
);
1670 int kvm_handle_cp15_64(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1672 const struct sys_reg_desc
*target_specific
;
1675 target_specific
= get_target_table(vcpu
->arch
.target
, false, &num
);
1676 return kvm_handle_cp_64(vcpu
,
1677 cp15_64_regs
, ARRAY_SIZE(cp15_64_regs
),
1678 target_specific
, num
);
1681 int kvm_handle_cp15_32(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1683 const struct sys_reg_desc
*target_specific
;
1686 target_specific
= get_target_table(vcpu
->arch
.target
, false, &num
);
1687 return kvm_handle_cp_32(vcpu
,
1688 cp15_regs
, ARRAY_SIZE(cp15_regs
),
1689 target_specific
, num
);
1692 int kvm_handle_cp14_64(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1694 return kvm_handle_cp_64(vcpu
,
1695 cp14_64_regs
, ARRAY_SIZE(cp14_64_regs
),
1699 int kvm_handle_cp14_32(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1701 return kvm_handle_cp_32(vcpu
,
1702 cp14_regs
, ARRAY_SIZE(cp14_regs
),
1706 static int emulate_sys_reg(struct kvm_vcpu
*vcpu
,
1707 struct sys_reg_params
*params
)
1710 const struct sys_reg_desc
*table
, *r
;
1712 table
= get_target_table(vcpu
->arch
.target
, true, &num
);
1714 /* Search target-specific then generic table. */
1715 r
= find_reg(params
, table
, num
);
1717 r
= find_reg(params
, sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
));
1721 * Not having an accessor means that we have
1722 * configured a trap that we don't know how to
1723 * handle. This certainly qualifies as a gross bug
1724 * that should be fixed right away.
1728 if (likely(r
->access(vcpu
, params
, r
))) {
1729 /* Skip instruction, since it was emulated */
1730 kvm_skip_instr(vcpu
, kvm_vcpu_trap_il_is32bit(vcpu
));
1733 /* If access function fails, it should complain. */
1735 kvm_err("Unsupported guest sys_reg access at: %lx\n",
1737 print_sys_reg_instr(params
);
1739 kvm_inject_undefined(vcpu
);
1743 static void reset_sys_reg_descs(struct kvm_vcpu
*vcpu
,
1744 const struct sys_reg_desc
*table
, size_t num
)
1748 for (i
= 0; i
< num
; i
++)
1750 table
[i
].reset(vcpu
, &table
[i
]);
1754 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
1755 * @vcpu: The VCPU pointer
1756 * @run: The kvm_run struct
1758 int kvm_handle_sys_reg(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1760 struct sys_reg_params params
;
1761 unsigned long esr
= kvm_vcpu_get_hsr(vcpu
);
1762 int Rt
= kvm_vcpu_sys_get_rt(vcpu
);
1765 trace_kvm_handle_sys_reg(esr
);
1767 params
.is_aarch32
= false;
1768 params
.is_32bit
= false;
1769 params
.Op0
= (esr
>> 20) & 3;
1770 params
.Op1
= (esr
>> 14) & 0x7;
1771 params
.CRn
= (esr
>> 10) & 0xf;
1772 params
.CRm
= (esr
>> 1) & 0xf;
1773 params
.Op2
= (esr
>> 17) & 0x7;
1774 params
.regval
= vcpu_get_reg(vcpu
, Rt
);
1775 params
.is_write
= !(esr
& 1);
1777 ret
= emulate_sys_reg(vcpu
, ¶ms
);
1779 if (!params
.is_write
)
1780 vcpu_set_reg(vcpu
, Rt
, params
.regval
);
1784 /******************************************************************************
1786 *****************************************************************************/
1788 static bool index_to_params(u64 id
, struct sys_reg_params
*params
)
1790 switch (id
& KVM_REG_SIZE_MASK
) {
1791 case KVM_REG_SIZE_U64
:
1792 /* Any unused index bits means it's not valid. */
1793 if (id
& ~(KVM_REG_ARCH_MASK
| KVM_REG_SIZE_MASK
1794 | KVM_REG_ARM_COPROC_MASK
1795 | KVM_REG_ARM64_SYSREG_OP0_MASK
1796 | KVM_REG_ARM64_SYSREG_OP1_MASK
1797 | KVM_REG_ARM64_SYSREG_CRN_MASK
1798 | KVM_REG_ARM64_SYSREG_CRM_MASK
1799 | KVM_REG_ARM64_SYSREG_OP2_MASK
))
1801 params
->Op0
= ((id
& KVM_REG_ARM64_SYSREG_OP0_MASK
)
1802 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT
);
1803 params
->Op1
= ((id
& KVM_REG_ARM64_SYSREG_OP1_MASK
)
1804 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT
);
1805 params
->CRn
= ((id
& KVM_REG_ARM64_SYSREG_CRN_MASK
)
1806 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT
);
1807 params
->CRm
= ((id
& KVM_REG_ARM64_SYSREG_CRM_MASK
)
1808 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT
);
1809 params
->Op2
= ((id
& KVM_REG_ARM64_SYSREG_OP2_MASK
)
1810 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT
);
1817 /* Decode an index value, and find the sys_reg_desc entry. */
1818 static const struct sys_reg_desc
*index_to_sys_reg_desc(struct kvm_vcpu
*vcpu
,
1822 const struct sys_reg_desc
*table
, *r
;
1823 struct sys_reg_params params
;
1825 /* We only do sys_reg for now. */
1826 if ((id
& KVM_REG_ARM_COPROC_MASK
) != KVM_REG_ARM64_SYSREG
)
1829 if (!index_to_params(id
, ¶ms
))
1832 table
= get_target_table(vcpu
->arch
.target
, true, &num
);
1833 r
= find_reg(¶ms
, table
, num
);
1835 r
= find_reg(¶ms
, sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
));
1837 /* Not saved in the sys_reg array? */
1845 * These are the invariant sys_reg registers: we let the guest see the
1846 * host versions of these, so they're part of the guest state.
1848 * A future CPU may provide a mechanism to present different values to
1849 * the guest, or a future kvm may trap them.
1852 #define FUNCTION_INVARIANT(reg) \
1853 static void get_##reg(struct kvm_vcpu *v, \
1854 const struct sys_reg_desc *r) \
1856 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
1859 FUNCTION_INVARIANT(midr_el1
)
1860 FUNCTION_INVARIANT(ctr_el0
)
1861 FUNCTION_INVARIANT(revidr_el1
)
1862 FUNCTION_INVARIANT(id_pfr0_el1
)
1863 FUNCTION_INVARIANT(id_pfr1_el1
)
1864 FUNCTION_INVARIANT(id_dfr0_el1
)
1865 FUNCTION_INVARIANT(id_afr0_el1
)
1866 FUNCTION_INVARIANT(id_mmfr0_el1
)
1867 FUNCTION_INVARIANT(id_mmfr1_el1
)
1868 FUNCTION_INVARIANT(id_mmfr2_el1
)
1869 FUNCTION_INVARIANT(id_mmfr3_el1
)
1870 FUNCTION_INVARIANT(id_isar0_el1
)
1871 FUNCTION_INVARIANT(id_isar1_el1
)
1872 FUNCTION_INVARIANT(id_isar2_el1
)
1873 FUNCTION_INVARIANT(id_isar3_el1
)
1874 FUNCTION_INVARIANT(id_isar4_el1
)
1875 FUNCTION_INVARIANT(id_isar5_el1
)
1876 FUNCTION_INVARIANT(clidr_el1
)
1877 FUNCTION_INVARIANT(aidr_el1
)
1879 /* ->val is filled in by kvm_sys_reg_table_init() */
1880 static struct sys_reg_desc invariant_sys_regs
[] = {
1881 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000),
1882 NULL
, get_midr_el1
},
1883 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110),
1884 NULL
, get_revidr_el1
},
1885 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000),
1886 NULL
, get_id_pfr0_el1
},
1887 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001),
1888 NULL
, get_id_pfr1_el1
},
1889 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010),
1890 NULL
, get_id_dfr0_el1
},
1891 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011),
1892 NULL
, get_id_afr0_el1
},
1893 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100),
1894 NULL
, get_id_mmfr0_el1
},
1895 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101),
1896 NULL
, get_id_mmfr1_el1
},
1897 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110),
1898 NULL
, get_id_mmfr2_el1
},
1899 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111),
1900 NULL
, get_id_mmfr3_el1
},
1901 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
1902 NULL
, get_id_isar0_el1
},
1903 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001),
1904 NULL
, get_id_isar1_el1
},
1905 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
1906 NULL
, get_id_isar2_el1
},
1907 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011),
1908 NULL
, get_id_isar3_el1
},
1909 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100),
1910 NULL
, get_id_isar4_el1
},
1911 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101),
1912 NULL
, get_id_isar5_el1
},
1913 { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001),
1914 NULL
, get_clidr_el1
},
1915 { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111),
1916 NULL
, get_aidr_el1
},
1917 { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001),
1918 NULL
, get_ctr_el0
},
1921 static int reg_from_user(u64
*val
, const void __user
*uaddr
, u64 id
)
1923 if (copy_from_user(val
, uaddr
, KVM_REG_SIZE(id
)) != 0)
1928 static int reg_to_user(void __user
*uaddr
, const u64
*val
, u64 id
)
1930 if (copy_to_user(uaddr
, val
, KVM_REG_SIZE(id
)) != 0)
1935 static int get_invariant_sys_reg(u64 id
, void __user
*uaddr
)
1937 struct sys_reg_params params
;
1938 const struct sys_reg_desc
*r
;
1940 if (!index_to_params(id
, ¶ms
))
1943 r
= find_reg(¶ms
, invariant_sys_regs
, ARRAY_SIZE(invariant_sys_regs
));
1947 return reg_to_user(uaddr
, &r
->val
, id
);
1950 static int set_invariant_sys_reg(u64 id
, void __user
*uaddr
)
1952 struct sys_reg_params params
;
1953 const struct sys_reg_desc
*r
;
1955 u64 val
= 0; /* Make sure high bits are 0 for 32-bit regs */
1957 if (!index_to_params(id
, ¶ms
))
1959 r
= find_reg(¶ms
, invariant_sys_regs
, ARRAY_SIZE(invariant_sys_regs
));
1963 err
= reg_from_user(&val
, uaddr
, id
);
1967 /* This is what we mean by invariant: you can't change it. */
1974 static bool is_valid_cache(u32 val
)
1978 if (val
>= CSSELR_MAX
)
1981 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
1983 ctype
= (cache_levels
>> (level
* 3)) & 7;
1986 case 0: /* No cache */
1988 case 1: /* Instruction cache only */
1990 case 2: /* Data cache only */
1991 case 4: /* Unified cache */
1993 case 3: /* Separate instruction and data caches */
1995 default: /* Reserved: we can't know instruction or data. */
2000 static int demux_c15_get(u64 id
, void __user
*uaddr
)
2003 u32 __user
*uval
= uaddr
;
2005 /* Fail if we have unknown bits set. */
2006 if (id
& ~(KVM_REG_ARCH_MASK
|KVM_REG_SIZE_MASK
|KVM_REG_ARM_COPROC_MASK
2007 | ((1 << KVM_REG_ARM_COPROC_SHIFT
)-1)))
2010 switch (id
& KVM_REG_ARM_DEMUX_ID_MASK
) {
2011 case KVM_REG_ARM_DEMUX_ID_CCSIDR
:
2012 if (KVM_REG_SIZE(id
) != 4)
2014 val
= (id
& KVM_REG_ARM_DEMUX_VAL_MASK
)
2015 >> KVM_REG_ARM_DEMUX_VAL_SHIFT
;
2016 if (!is_valid_cache(val
))
2019 return put_user(get_ccsidr(val
), uval
);
2025 static int demux_c15_set(u64 id
, void __user
*uaddr
)
2028 u32 __user
*uval
= uaddr
;
2030 /* Fail if we have unknown bits set. */
2031 if (id
& ~(KVM_REG_ARCH_MASK
|KVM_REG_SIZE_MASK
|KVM_REG_ARM_COPROC_MASK
2032 | ((1 << KVM_REG_ARM_COPROC_SHIFT
)-1)))
2035 switch (id
& KVM_REG_ARM_DEMUX_ID_MASK
) {
2036 case KVM_REG_ARM_DEMUX_ID_CCSIDR
:
2037 if (KVM_REG_SIZE(id
) != 4)
2039 val
= (id
& KVM_REG_ARM_DEMUX_VAL_MASK
)
2040 >> KVM_REG_ARM_DEMUX_VAL_SHIFT
;
2041 if (!is_valid_cache(val
))
2044 if (get_user(newval
, uval
))
2047 /* This is also invariant: you can't change it. */
2048 if (newval
!= get_ccsidr(val
))
2056 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
2058 const struct sys_reg_desc
*r
;
2059 void __user
*uaddr
= (void __user
*)(unsigned long)reg
->addr
;
2061 if ((reg
->id
& KVM_REG_ARM_COPROC_MASK
) == KVM_REG_ARM_DEMUX
)
2062 return demux_c15_get(reg
->id
, uaddr
);
2064 if (KVM_REG_SIZE(reg
->id
) != sizeof(__u64
))
2067 r
= index_to_sys_reg_desc(vcpu
, reg
->id
);
2069 return get_invariant_sys_reg(reg
->id
, uaddr
);
2072 return (r
->get_user
)(vcpu
, r
, reg
, uaddr
);
2074 return reg_to_user(uaddr
, &vcpu_sys_reg(vcpu
, r
->reg
), reg
->id
);
2077 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
2079 const struct sys_reg_desc
*r
;
2080 void __user
*uaddr
= (void __user
*)(unsigned long)reg
->addr
;
2082 if ((reg
->id
& KVM_REG_ARM_COPROC_MASK
) == KVM_REG_ARM_DEMUX
)
2083 return demux_c15_set(reg
->id
, uaddr
);
2085 if (KVM_REG_SIZE(reg
->id
) != sizeof(__u64
))
2088 r
= index_to_sys_reg_desc(vcpu
, reg
->id
);
2090 return set_invariant_sys_reg(reg
->id
, uaddr
);
2093 return (r
->set_user
)(vcpu
, r
, reg
, uaddr
);
2095 return reg_from_user(&vcpu_sys_reg(vcpu
, r
->reg
), uaddr
, reg
->id
);
2098 static unsigned int num_demux_regs(void)
2100 unsigned int i
, count
= 0;
2102 for (i
= 0; i
< CSSELR_MAX
; i
++)
2103 if (is_valid_cache(i
))
2109 static int write_demux_regids(u64 __user
*uindices
)
2111 u64 val
= KVM_REG_ARM64
| KVM_REG_SIZE_U32
| KVM_REG_ARM_DEMUX
;
2114 val
|= KVM_REG_ARM_DEMUX_ID_CCSIDR
;
2115 for (i
= 0; i
< CSSELR_MAX
; i
++) {
2116 if (!is_valid_cache(i
))
2118 if (put_user(val
| i
, uindices
))
2125 static u64
sys_reg_to_index(const struct sys_reg_desc
*reg
)
2127 return (KVM_REG_ARM64
| KVM_REG_SIZE_U64
|
2128 KVM_REG_ARM64_SYSREG
|
2129 (reg
->Op0
<< KVM_REG_ARM64_SYSREG_OP0_SHIFT
) |
2130 (reg
->Op1
<< KVM_REG_ARM64_SYSREG_OP1_SHIFT
) |
2131 (reg
->CRn
<< KVM_REG_ARM64_SYSREG_CRN_SHIFT
) |
2132 (reg
->CRm
<< KVM_REG_ARM64_SYSREG_CRM_SHIFT
) |
2133 (reg
->Op2
<< KVM_REG_ARM64_SYSREG_OP2_SHIFT
));
2136 static bool copy_reg_to_user(const struct sys_reg_desc
*reg
, u64 __user
**uind
)
2141 if (put_user(sys_reg_to_index(reg
), *uind
))
2148 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
2149 static int walk_sys_regs(struct kvm_vcpu
*vcpu
, u64 __user
*uind
)
2151 const struct sys_reg_desc
*i1
, *i2
, *end1
, *end2
;
2152 unsigned int total
= 0;
2155 /* We check for duplicates here, to allow arch-specific overrides. */
2156 i1
= get_target_table(vcpu
->arch
.target
, true, &num
);
2159 end2
= sys_reg_descs
+ ARRAY_SIZE(sys_reg_descs
);
2161 BUG_ON(i1
== end1
|| i2
== end2
);
2163 /* Walk carefully, as both tables may refer to the same register. */
2165 int cmp
= cmp_sys_reg(i1
, i2
);
2166 /* target-specific overrides generic entry. */
2168 /* Ignore registers we trap but don't save. */
2170 if (!copy_reg_to_user(i1
, &uind
))
2175 /* Ignore registers we trap but don't save. */
2177 if (!copy_reg_to_user(i2
, &uind
))
2183 if (cmp
<= 0 && ++i1
== end1
)
2185 if (cmp
>= 0 && ++i2
== end2
)
2191 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu
*vcpu
)
2193 return ARRAY_SIZE(invariant_sys_regs
)
2195 + walk_sys_regs(vcpu
, (u64 __user
*)NULL
);
2198 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu
*vcpu
, u64 __user
*uindices
)
2203 /* Then give them all the invariant registers' indices. */
2204 for (i
= 0; i
< ARRAY_SIZE(invariant_sys_regs
); i
++) {
2205 if (put_user(sys_reg_to_index(&invariant_sys_regs
[i
]), uindices
))
2210 err
= walk_sys_regs(vcpu
, uindices
);
2215 return write_demux_regids(uindices
);
2218 static int check_sysreg_table(const struct sys_reg_desc
*table
, unsigned int n
)
2222 for (i
= 1; i
< n
; i
++) {
2223 if (cmp_sys_reg(&table
[i
-1], &table
[i
]) >= 0) {
2224 kvm_err("sys_reg table %p out of order (%d)\n", table
, i
- 1);
2232 void kvm_sys_reg_table_init(void)
2235 struct sys_reg_desc clidr
;
2237 /* Make sure tables are unique and in order. */
2238 BUG_ON(check_sysreg_table(sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
)));
2239 BUG_ON(check_sysreg_table(cp14_regs
, ARRAY_SIZE(cp14_regs
)));
2240 BUG_ON(check_sysreg_table(cp14_64_regs
, ARRAY_SIZE(cp14_64_regs
)));
2241 BUG_ON(check_sysreg_table(cp15_regs
, ARRAY_SIZE(cp15_regs
)));
2242 BUG_ON(check_sysreg_table(cp15_64_regs
, ARRAY_SIZE(cp15_64_regs
)));
2243 BUG_ON(check_sysreg_table(invariant_sys_regs
, ARRAY_SIZE(invariant_sys_regs
)));
2245 /* We abuse the reset function to overwrite the table itself. */
2246 for (i
= 0; i
< ARRAY_SIZE(invariant_sys_regs
); i
++)
2247 invariant_sys_regs
[i
].reset(NULL
, &invariant_sys_regs
[i
]);
2250 * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
2252 * If software reads the Cache Type fields from Ctype1
2253 * upwards, once it has seen a value of 0b000, no caches
2254 * exist at further-out levels of the hierarchy. So, for
2255 * example, if Ctype3 is the first Cache Type field with a
2256 * value of 0b000, the values of Ctype4 to Ctype7 must be
2259 get_clidr_el1(NULL
, &clidr
); /* Ugly... */
2260 cache_levels
= clidr
.val
;
2261 for (i
= 0; i
< 7; i
++)
2262 if (((cache_levels
>> (i
*3)) & 7) == 0)
2264 /* Clear all higher bits. */
2265 cache_levels
&= (1 << (i
*3))-1;
2269 * kvm_reset_sys_regs - sets system registers to reset value
2270 * @vcpu: The VCPU pointer
2272 * This function finds the right table above and sets the registers on the
2273 * virtual CPU struct to their architecturally defined reset values.
2275 void kvm_reset_sys_regs(struct kvm_vcpu
*vcpu
)
2278 const struct sys_reg_desc
*table
;
2280 /* Catch someone adding a register without putting in reset entry. */
2281 memset(&vcpu
->arch
.ctxt
.sys_regs
, 0x42, sizeof(vcpu
->arch
.ctxt
.sys_regs
));
2283 /* Generic chip reset first (so target could override). */
2284 reset_sys_reg_descs(vcpu
, sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
));
2286 table
= get_target_table(vcpu
->arch
.target
, true, &num
);
2287 reset_sys_reg_descs(vcpu
, table
, num
);
2289 for (num
= 1; num
< NR_SYS_REGS
; num
++)
2290 if (vcpu_sys_reg(vcpu
, num
) == 0x4242424242424242)
2291 panic("Didn't reset vcpu_sys_reg(%zi)", num
);