2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * Derived from arch/arm/kvm/coproc.c:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Authors: Rusty Russell <rusty@rustcorp.com.au>
8 * Christoffer Dall <c.dall@virtualopensystems.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include <linux/bsearch.h>
24 #include <linux/kvm_host.h>
26 #include <linux/uaccess.h>
28 #include <asm/cacheflush.h>
29 #include <asm/cputype.h>
30 #include <asm/debug-monitors.h>
32 #include <asm/kvm_arm.h>
33 #include <asm/kvm_asm.h>
34 #include <asm/kvm_coproc.h>
35 #include <asm/kvm_emulate.h>
36 #include <asm/kvm_host.h>
37 #include <asm/kvm_mmu.h>
38 #include <asm/perf_event.h>
39 #include <asm/sysreg.h>
41 #include <trace/events/kvm.h>
48 * All of this file is extremly similar to the ARM coproc.c, but the
49 * types are different. My gut feeling is that it should be pretty
50 * easy to merge, but that would be an ABI breakage -- again. VFP
51 * would also need to be abstracted.
53 * For AArch32, we only take care of what is being trapped. Anything
54 * that has to do with init and userspace access has to go via the
58 static bool read_from_write_only(struct kvm_vcpu
*vcpu
,
59 const struct sys_reg_params
*params
)
61 WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
62 print_sys_reg_instr(params
);
63 kvm_inject_undefined(vcpu
);
67 /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
68 static u32 cache_levels
;
70 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
73 /* Which cache CCSIDR represents depends on CSSELR value. */
74 static u32
get_ccsidr(u32 csselr
)
78 /* Make sure noone else changes CSSELR during this! */
80 write_sysreg(csselr
, csselr_el1
);
82 ccsidr
= read_sysreg(ccsidr_el1
);
89 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
91 static bool access_dcsw(struct kvm_vcpu
*vcpu
,
92 struct sys_reg_params
*p
,
93 const struct sys_reg_desc
*r
)
96 return read_from_write_only(vcpu
, p
);
98 kvm_set_way_flush(vcpu
);
103 * Generic accessor for VM registers. Only called as long as HCR_TVM
104 * is set. If the guest enables the MMU, we stop trapping the VM
105 * sys_regs and leave it in complete control of the caches.
107 static bool access_vm_reg(struct kvm_vcpu
*vcpu
,
108 struct sys_reg_params
*p
,
109 const struct sys_reg_desc
*r
)
111 bool was_enabled
= vcpu_has_cache_enabled(vcpu
);
113 BUG_ON(!p
->is_write
);
115 if (!p
->is_aarch32
) {
116 vcpu_sys_reg(vcpu
, r
->reg
) = p
->regval
;
119 vcpu_cp15_64_high(vcpu
, r
->reg
) = upper_32_bits(p
->regval
);
120 vcpu_cp15_64_low(vcpu
, r
->reg
) = lower_32_bits(p
->regval
);
123 kvm_toggle_cache(vcpu
, was_enabled
);
128 * Trap handler for the GICv3 SGI generation system register.
129 * Forward the request to the VGIC emulation.
130 * The cp15_64 code makes sure this automatically works
131 * for both AArch64 and AArch32 accesses.
133 static bool access_gic_sgi(struct kvm_vcpu
*vcpu
,
134 struct sys_reg_params
*p
,
135 const struct sys_reg_desc
*r
)
138 return read_from_write_only(vcpu
, p
);
140 vgic_v3_dispatch_sgi(vcpu
, p
->regval
);
145 static bool access_gic_sre(struct kvm_vcpu
*vcpu
,
146 struct sys_reg_params
*p
,
147 const struct sys_reg_desc
*r
)
150 return ignore_write(vcpu
, p
);
152 p
->regval
= vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_sre
;
156 static bool trap_raz_wi(struct kvm_vcpu
*vcpu
,
157 struct sys_reg_params
*p
,
158 const struct sys_reg_desc
*r
)
161 return ignore_write(vcpu
, p
);
163 return read_zero(vcpu
, p
);
166 static bool trap_oslsr_el1(struct kvm_vcpu
*vcpu
,
167 struct sys_reg_params
*p
,
168 const struct sys_reg_desc
*r
)
171 return ignore_write(vcpu
, p
);
173 p
->regval
= (1 << 3);
178 static bool trap_dbgauthstatus_el1(struct kvm_vcpu
*vcpu
,
179 struct sys_reg_params
*p
,
180 const struct sys_reg_desc
*r
)
183 return ignore_write(vcpu
, p
);
185 p
->regval
= read_sysreg(dbgauthstatus_el1
);
191 * We want to avoid world-switching all the DBG registers all the
194 * - If we've touched any debug register, it is likely that we're
195 * going to touch more of them. It then makes sense to disable the
196 * traps and start doing the save/restore dance
197 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
198 * then mandatory to save/restore the registers, as the guest
201 * For this, we use a DIRTY bit, indicating the guest has modified the
202 * debug registers, used as follow:
205 * - If the dirty bit is set (because we're coming back from trapping),
206 * disable the traps, save host registers, restore guest registers.
207 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
208 * set the dirty bit, disable the traps, save host registers,
209 * restore guest registers.
210 * - Otherwise, enable the traps
213 * - If the dirty bit is set, save guest registers, restore host
214 * registers and clear the dirty bit. This ensure that the host can
215 * now use the debug registers.
217 static bool trap_debug_regs(struct kvm_vcpu
*vcpu
,
218 struct sys_reg_params
*p
,
219 const struct sys_reg_desc
*r
)
222 vcpu_sys_reg(vcpu
, r
->reg
) = p
->regval
;
223 vcpu
->arch
.debug_flags
|= KVM_ARM64_DEBUG_DIRTY
;
225 p
->regval
= vcpu_sys_reg(vcpu
, r
->reg
);
228 trace_trap_reg(__func__
, r
->reg
, p
->is_write
, p
->regval
);
234 * reg_to_dbg/dbg_to_reg
236 * A 32 bit write to a debug register leave top bits alone
237 * A 32 bit read from a debug register only returns the bottom bits
239 * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
240 * hyp.S code switches between host and guest values in future.
242 static void reg_to_dbg(struct kvm_vcpu
*vcpu
,
243 struct sys_reg_params
*p
,
250 val
|= ((*dbg_reg
>> 32) << 32);
254 vcpu
->arch
.debug_flags
|= KVM_ARM64_DEBUG_DIRTY
;
257 static void dbg_to_reg(struct kvm_vcpu
*vcpu
,
258 struct sys_reg_params
*p
,
261 p
->regval
= *dbg_reg
;
263 p
->regval
&= 0xffffffffUL
;
266 static bool trap_bvr(struct kvm_vcpu
*vcpu
,
267 struct sys_reg_params
*p
,
268 const struct sys_reg_desc
*rd
)
270 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->reg
];
273 reg_to_dbg(vcpu
, p
, dbg_reg
);
275 dbg_to_reg(vcpu
, p
, dbg_reg
);
277 trace_trap_reg(__func__
, rd
->reg
, p
->is_write
, *dbg_reg
);
282 static int set_bvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
283 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
285 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->reg
];
287 if (copy_from_user(r
, uaddr
, KVM_REG_SIZE(reg
->id
)) != 0)
292 static int get_bvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
293 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
295 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->reg
];
297 if (copy_to_user(uaddr
, r
, KVM_REG_SIZE(reg
->id
)) != 0)
302 static void reset_bvr(struct kvm_vcpu
*vcpu
,
303 const struct sys_reg_desc
*rd
)
305 vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->reg
] = rd
->val
;
308 static bool trap_bcr(struct kvm_vcpu
*vcpu
,
309 struct sys_reg_params
*p
,
310 const struct sys_reg_desc
*rd
)
312 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->reg
];
315 reg_to_dbg(vcpu
, p
, dbg_reg
);
317 dbg_to_reg(vcpu
, p
, dbg_reg
);
319 trace_trap_reg(__func__
, rd
->reg
, p
->is_write
, *dbg_reg
);
324 static int set_bcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
325 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
327 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->reg
];
329 if (copy_from_user(r
, uaddr
, KVM_REG_SIZE(reg
->id
)) != 0)
335 static int get_bcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
336 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
338 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->reg
];
340 if (copy_to_user(uaddr
, r
, KVM_REG_SIZE(reg
->id
)) != 0)
345 static void reset_bcr(struct kvm_vcpu
*vcpu
,
346 const struct sys_reg_desc
*rd
)
348 vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->reg
] = rd
->val
;
351 static bool trap_wvr(struct kvm_vcpu
*vcpu
,
352 struct sys_reg_params
*p
,
353 const struct sys_reg_desc
*rd
)
355 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->reg
];
358 reg_to_dbg(vcpu
, p
, dbg_reg
);
360 dbg_to_reg(vcpu
, p
, dbg_reg
);
362 trace_trap_reg(__func__
, rd
->reg
, p
->is_write
,
363 vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->reg
]);
368 static int set_wvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
369 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
371 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->reg
];
373 if (copy_from_user(r
, uaddr
, KVM_REG_SIZE(reg
->id
)) != 0)
378 static int get_wvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
379 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
381 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->reg
];
383 if (copy_to_user(uaddr
, r
, KVM_REG_SIZE(reg
->id
)) != 0)
388 static void reset_wvr(struct kvm_vcpu
*vcpu
,
389 const struct sys_reg_desc
*rd
)
391 vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->reg
] = rd
->val
;
394 static bool trap_wcr(struct kvm_vcpu
*vcpu
,
395 struct sys_reg_params
*p
,
396 const struct sys_reg_desc
*rd
)
398 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->reg
];
401 reg_to_dbg(vcpu
, p
, dbg_reg
);
403 dbg_to_reg(vcpu
, p
, dbg_reg
);
405 trace_trap_reg(__func__
, rd
->reg
, p
->is_write
, *dbg_reg
);
410 static int set_wcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
411 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
413 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->reg
];
415 if (copy_from_user(r
, uaddr
, KVM_REG_SIZE(reg
->id
)) != 0)
420 static int get_wcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
421 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
423 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->reg
];
425 if (copy_to_user(uaddr
, r
, KVM_REG_SIZE(reg
->id
)) != 0)
430 static void reset_wcr(struct kvm_vcpu
*vcpu
,
431 const struct sys_reg_desc
*rd
)
433 vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->reg
] = rd
->val
;
436 static void reset_amair_el1(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
438 vcpu_sys_reg(vcpu
, AMAIR_EL1
) = read_sysreg(amair_el1
);
441 static void reset_mpidr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
446 * Map the vcpu_id into the first three affinity level fields of
447 * the MPIDR. We limit the number of VCPUs in level 0 due to a
448 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
449 * of the GICv3 to be able to address each CPU directly when
452 mpidr
= (vcpu
->vcpu_id
& 0x0f) << MPIDR_LEVEL_SHIFT(0);
453 mpidr
|= ((vcpu
->vcpu_id
>> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
454 mpidr
|= ((vcpu
->vcpu_id
>> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
455 vcpu_sys_reg(vcpu
, MPIDR_EL1
) = (1ULL << 31) | mpidr
;
458 static void reset_pmcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
462 pmcr
= read_sysreg(pmcr_el0
);
464 * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
465 * except PMCR.E resetting to zero.
467 val
= ((pmcr
& ~ARMV8_PMU_PMCR_MASK
)
468 | (ARMV8_PMU_PMCR_MASK
& 0xdecafbad)) & (~ARMV8_PMU_PMCR_E
);
469 vcpu_sys_reg(vcpu
, PMCR_EL0
) = val
;
472 static bool pmu_access_el0_disabled(struct kvm_vcpu
*vcpu
)
474 u64 reg
= vcpu_sys_reg(vcpu
, PMUSERENR_EL0
);
476 return !((reg
& ARMV8_PMU_USERENR_EN
) || vcpu_mode_priv(vcpu
));
479 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu
*vcpu
)
481 u64 reg
= vcpu_sys_reg(vcpu
, PMUSERENR_EL0
);
483 return !((reg
& (ARMV8_PMU_USERENR_SW
| ARMV8_PMU_USERENR_EN
))
484 || vcpu_mode_priv(vcpu
));
487 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu
*vcpu
)
489 u64 reg
= vcpu_sys_reg(vcpu
, PMUSERENR_EL0
);
491 return !((reg
& (ARMV8_PMU_USERENR_CR
| ARMV8_PMU_USERENR_EN
))
492 || vcpu_mode_priv(vcpu
));
495 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu
*vcpu
)
497 u64 reg
= vcpu_sys_reg(vcpu
, PMUSERENR_EL0
);
499 return !((reg
& (ARMV8_PMU_USERENR_ER
| ARMV8_PMU_USERENR_EN
))
500 || vcpu_mode_priv(vcpu
));
503 static bool access_pmcr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
504 const struct sys_reg_desc
*r
)
508 if (!kvm_arm_pmu_v3_ready(vcpu
))
509 return trap_raz_wi(vcpu
, p
, r
);
511 if (pmu_access_el0_disabled(vcpu
))
515 /* Only update writeable bits of PMCR */
516 val
= vcpu_sys_reg(vcpu
, PMCR_EL0
);
517 val
&= ~ARMV8_PMU_PMCR_MASK
;
518 val
|= p
->regval
& ARMV8_PMU_PMCR_MASK
;
519 vcpu_sys_reg(vcpu
, PMCR_EL0
) = val
;
520 kvm_pmu_handle_pmcr(vcpu
, val
);
522 /* PMCR.P & PMCR.C are RAZ */
523 val
= vcpu_sys_reg(vcpu
, PMCR_EL0
)
524 & ~(ARMV8_PMU_PMCR_P
| ARMV8_PMU_PMCR_C
);
531 static bool access_pmselr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
532 const struct sys_reg_desc
*r
)
534 if (!kvm_arm_pmu_v3_ready(vcpu
))
535 return trap_raz_wi(vcpu
, p
, r
);
537 if (pmu_access_event_counter_el0_disabled(vcpu
))
541 vcpu_sys_reg(vcpu
, PMSELR_EL0
) = p
->regval
;
543 /* return PMSELR.SEL field */
544 p
->regval
= vcpu_sys_reg(vcpu
, PMSELR_EL0
)
545 & ARMV8_PMU_COUNTER_MASK
;
550 static bool access_pmceid(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
551 const struct sys_reg_desc
*r
)
555 if (!kvm_arm_pmu_v3_ready(vcpu
))
556 return trap_raz_wi(vcpu
, p
, r
);
560 if (pmu_access_el0_disabled(vcpu
))
564 pmceid
= read_sysreg(pmceid0_el0
);
566 pmceid
= read_sysreg(pmceid1_el0
);
573 static bool pmu_counter_idx_valid(struct kvm_vcpu
*vcpu
, u64 idx
)
577 pmcr
= vcpu_sys_reg(vcpu
, PMCR_EL0
);
578 val
= (pmcr
>> ARMV8_PMU_PMCR_N_SHIFT
) & ARMV8_PMU_PMCR_N_MASK
;
579 if (idx
>= val
&& idx
!= ARMV8_PMU_CYCLE_IDX
)
585 static bool access_pmu_evcntr(struct kvm_vcpu
*vcpu
,
586 struct sys_reg_params
*p
,
587 const struct sys_reg_desc
*r
)
591 if (!kvm_arm_pmu_v3_ready(vcpu
))
592 return trap_raz_wi(vcpu
, p
, r
);
594 if (r
->CRn
== 9 && r
->CRm
== 13) {
597 if (pmu_access_event_counter_el0_disabled(vcpu
))
600 idx
= vcpu_sys_reg(vcpu
, PMSELR_EL0
)
601 & ARMV8_PMU_COUNTER_MASK
;
602 } else if (r
->Op2
== 0) {
604 if (pmu_access_cycle_counter_el0_disabled(vcpu
))
607 idx
= ARMV8_PMU_CYCLE_IDX
;
611 } else if (r
->CRn
== 0 && r
->CRm
== 9) {
613 if (pmu_access_event_counter_el0_disabled(vcpu
))
616 idx
= ARMV8_PMU_CYCLE_IDX
;
617 } else if (r
->CRn
== 14 && (r
->CRm
& 12) == 8) {
619 if (pmu_access_event_counter_el0_disabled(vcpu
))
622 idx
= ((r
->CRm
& 3) << 3) | (r
->Op2
& 7);
627 if (!pmu_counter_idx_valid(vcpu
, idx
))
631 if (pmu_access_el0_disabled(vcpu
))
634 kvm_pmu_set_counter_value(vcpu
, idx
, p
->regval
);
636 p
->regval
= kvm_pmu_get_counter_value(vcpu
, idx
);
642 static bool access_pmu_evtyper(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
643 const struct sys_reg_desc
*r
)
647 if (!kvm_arm_pmu_v3_ready(vcpu
))
648 return trap_raz_wi(vcpu
, p
, r
);
650 if (pmu_access_el0_disabled(vcpu
))
653 if (r
->CRn
== 9 && r
->CRm
== 13 && r
->Op2
== 1) {
655 idx
= vcpu_sys_reg(vcpu
, PMSELR_EL0
) & ARMV8_PMU_COUNTER_MASK
;
656 reg
= PMEVTYPER0_EL0
+ idx
;
657 } else if (r
->CRn
== 14 && (r
->CRm
& 12) == 12) {
658 idx
= ((r
->CRm
& 3) << 3) | (r
->Op2
& 7);
659 if (idx
== ARMV8_PMU_CYCLE_IDX
)
663 reg
= PMEVTYPER0_EL0
+ idx
;
668 if (!pmu_counter_idx_valid(vcpu
, idx
))
672 kvm_pmu_set_counter_event_type(vcpu
, p
->regval
, idx
);
673 vcpu_sys_reg(vcpu
, reg
) = p
->regval
& ARMV8_PMU_EVTYPE_MASK
;
675 p
->regval
= vcpu_sys_reg(vcpu
, reg
) & ARMV8_PMU_EVTYPE_MASK
;
681 static bool access_pmcnten(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
682 const struct sys_reg_desc
*r
)
686 if (!kvm_arm_pmu_v3_ready(vcpu
))
687 return trap_raz_wi(vcpu
, p
, r
);
689 if (pmu_access_el0_disabled(vcpu
))
692 mask
= kvm_pmu_valid_counter_mask(vcpu
);
694 val
= p
->regval
& mask
;
696 /* accessing PMCNTENSET_EL0 */
697 vcpu_sys_reg(vcpu
, PMCNTENSET_EL0
) |= val
;
698 kvm_pmu_enable_counter(vcpu
, val
);
700 /* accessing PMCNTENCLR_EL0 */
701 vcpu_sys_reg(vcpu
, PMCNTENSET_EL0
) &= ~val
;
702 kvm_pmu_disable_counter(vcpu
, val
);
705 p
->regval
= vcpu_sys_reg(vcpu
, PMCNTENSET_EL0
) & mask
;
711 static bool access_pminten(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
712 const struct sys_reg_desc
*r
)
714 u64 mask
= kvm_pmu_valid_counter_mask(vcpu
);
716 if (!kvm_arm_pmu_v3_ready(vcpu
))
717 return trap_raz_wi(vcpu
, p
, r
);
719 if (!vcpu_mode_priv(vcpu
))
723 u64 val
= p
->regval
& mask
;
726 /* accessing PMINTENSET_EL1 */
727 vcpu_sys_reg(vcpu
, PMINTENSET_EL1
) |= val
;
729 /* accessing PMINTENCLR_EL1 */
730 vcpu_sys_reg(vcpu
, PMINTENSET_EL1
) &= ~val
;
732 p
->regval
= vcpu_sys_reg(vcpu
, PMINTENSET_EL1
) & mask
;
738 static bool access_pmovs(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
739 const struct sys_reg_desc
*r
)
741 u64 mask
= kvm_pmu_valid_counter_mask(vcpu
);
743 if (!kvm_arm_pmu_v3_ready(vcpu
))
744 return trap_raz_wi(vcpu
, p
, r
);
746 if (pmu_access_el0_disabled(vcpu
))
751 /* accessing PMOVSSET_EL0 */
752 kvm_pmu_overflow_set(vcpu
, p
->regval
& mask
);
754 /* accessing PMOVSCLR_EL0 */
755 vcpu_sys_reg(vcpu
, PMOVSSET_EL0
) &= ~(p
->regval
& mask
);
757 p
->regval
= vcpu_sys_reg(vcpu
, PMOVSSET_EL0
) & mask
;
763 static bool access_pmswinc(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
764 const struct sys_reg_desc
*r
)
768 if (!kvm_arm_pmu_v3_ready(vcpu
))
769 return trap_raz_wi(vcpu
, p
, r
);
771 if (pmu_write_swinc_el0_disabled(vcpu
))
775 mask
= kvm_pmu_valid_counter_mask(vcpu
);
776 kvm_pmu_software_increment(vcpu
, p
->regval
& mask
);
783 static bool access_pmuserenr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
784 const struct sys_reg_desc
*r
)
786 if (!kvm_arm_pmu_v3_ready(vcpu
))
787 return trap_raz_wi(vcpu
, p
, r
);
790 if (!vcpu_mode_priv(vcpu
))
793 vcpu_sys_reg(vcpu
, PMUSERENR_EL0
) = p
->regval
794 & ARMV8_PMU_USERENR_MASK
;
796 p
->regval
= vcpu_sys_reg(vcpu
, PMUSERENR_EL0
)
797 & ARMV8_PMU_USERENR_MASK
;
803 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
804 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
806 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \
807 trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \
809 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \
810 trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \
812 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \
813 trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \
815 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \
816 trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
818 /* Macro to expand the PMEVCNTRn_EL0 register */
819 #define PMU_PMEVCNTR_EL0(n) \
820 /* PMEVCNTRn_EL0 */ \
821 { Op0(0b11), Op1(0b011), CRn(0b1110), \
822 CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
823 access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
825 /* Macro to expand the PMEVTYPERn_EL0 register */
826 #define PMU_PMEVTYPER_EL0(n) \
827 /* PMEVTYPERn_EL0 */ \
828 { Op0(0b11), Op1(0b011), CRn(0b1110), \
829 CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
830 access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
833 * Architected system registers.
834 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
836 * Debug handling: We do trap most, if not all debug related system
837 * registers. The implementation is good enough to ensure that a guest
838 * can use these with minimal performance degradation. The drawback is
839 * that we don't implement any of the external debug, none of the
840 * OSlock protocol. This should be revisited if we ever encounter a
841 * more demanding guest...
843 static const struct sys_reg_desc sys_reg_descs
[] = {
845 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010),
848 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010),
851 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
854 DBG_BCR_BVR_WCR_WVR_EL1(0),
855 DBG_BCR_BVR_WCR_WVR_EL1(1),
857 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
858 trap_debug_regs
, reset_val
, MDCCINT_EL1
, 0 },
860 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
861 trap_debug_regs
, reset_val
, MDSCR_EL1
, 0 },
862 DBG_BCR_BVR_WCR_WVR_EL1(2),
863 DBG_BCR_BVR_WCR_WVR_EL1(3),
864 DBG_BCR_BVR_WCR_WVR_EL1(4),
865 DBG_BCR_BVR_WCR_WVR_EL1(5),
866 DBG_BCR_BVR_WCR_WVR_EL1(6),
867 DBG_BCR_BVR_WCR_WVR_EL1(7),
868 DBG_BCR_BVR_WCR_WVR_EL1(8),
869 DBG_BCR_BVR_WCR_WVR_EL1(9),
870 DBG_BCR_BVR_WCR_WVR_EL1(10),
871 DBG_BCR_BVR_WCR_WVR_EL1(11),
872 DBG_BCR_BVR_WCR_WVR_EL1(12),
873 DBG_BCR_BVR_WCR_WVR_EL1(13),
874 DBG_BCR_BVR_WCR_WVR_EL1(14),
875 DBG_BCR_BVR_WCR_WVR_EL1(15),
878 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
881 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100),
884 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100),
887 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100),
890 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100),
892 /* DBGCLAIMSET_EL1 */
893 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110),
895 /* DBGCLAIMCLR_EL1 */
896 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110),
898 /* DBGAUTHSTATUS_EL1 */
899 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110),
900 trap_dbgauthstatus_el1
},
903 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000),
906 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000),
908 /* DBGDTR[TR]X_EL0 */
909 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000),
913 { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
914 NULL
, reset_val
, DBGVCR32_EL2
, 0 },
917 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101),
918 NULL
, reset_mpidr
, MPIDR_EL1
},
920 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
921 access_vm_reg
, reset_val
, SCTLR_EL1
, 0x00C50078 },
923 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
924 NULL
, reset_val
, CPACR_EL1
, 0 },
926 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
927 access_vm_reg
, reset_unknown
, TTBR0_EL1
},
929 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
930 access_vm_reg
, reset_unknown
, TTBR1_EL1
},
932 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
933 access_vm_reg
, reset_val
, TCR_EL1
, 0 },
936 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
937 access_vm_reg
, reset_unknown
, AFSR0_EL1
},
939 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
940 access_vm_reg
, reset_unknown
, AFSR1_EL1
},
942 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
943 access_vm_reg
, reset_unknown
, ESR_EL1
},
945 { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
946 access_vm_reg
, reset_unknown
, FAR_EL1
},
948 { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
949 NULL
, reset_unknown
, PAR_EL1
},
952 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
953 access_pminten
, reset_unknown
, PMINTENSET_EL1
},
955 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
956 access_pminten
, NULL
, PMINTENSET_EL1
},
959 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
960 access_vm_reg
, reset_unknown
, MAIR_EL1
},
962 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
963 access_vm_reg
, reset_amair_el1
, AMAIR_EL1
},
966 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
967 NULL
, reset_val
, VBAR_EL1
, 0 },
970 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1011), Op2(0b101),
973 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
977 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
978 access_vm_reg
, reset_val
, CONTEXTIDR_EL1
, 0 },
980 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
981 NULL
, reset_unknown
, TPIDR_EL1
},
984 { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000),
985 NULL
, reset_val
, CNTKCTL_EL1
, 0},
988 { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
989 NULL
, reset_unknown
, CSSELR_EL1
},
992 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
993 access_pmcr
, reset_pmcr
, },
995 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
996 access_pmcnten
, reset_unknown
, PMCNTENSET_EL0
},
998 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
999 access_pmcnten
, NULL
, PMCNTENSET_EL0
},
1001 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
1002 access_pmovs
, NULL
, PMOVSSET_EL0
},
1004 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
1005 access_pmswinc
, reset_unknown
, PMSWINC_EL0
},
1007 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
1008 access_pmselr
, reset_unknown
, PMSELR_EL0
},
1010 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
1013 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
1016 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
1017 access_pmu_evcntr
, reset_unknown
, PMCCNTR_EL0
},
1018 /* PMXEVTYPER_EL0 */
1019 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
1020 access_pmu_evtyper
},
1022 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
1023 access_pmu_evcntr
},
1025 * This register resets as unknown in 64bit mode while it resets as zero
1026 * in 32bit mode. Here we choose to reset it as zero for consistency.
1028 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
1029 access_pmuserenr
, reset_val
, PMUSERENR_EL0
, 0 },
1031 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
1032 access_pmovs
, reset_unknown
, PMOVSSET_EL0
},
1035 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
1036 NULL
, reset_unknown
, TPIDR_EL0
},
1038 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
1039 NULL
, reset_unknown
, TPIDRRO_EL0
},
1042 PMU_PMEVCNTR_EL0(0),
1043 PMU_PMEVCNTR_EL0(1),
1044 PMU_PMEVCNTR_EL0(2),
1045 PMU_PMEVCNTR_EL0(3),
1046 PMU_PMEVCNTR_EL0(4),
1047 PMU_PMEVCNTR_EL0(5),
1048 PMU_PMEVCNTR_EL0(6),
1049 PMU_PMEVCNTR_EL0(7),
1050 PMU_PMEVCNTR_EL0(8),
1051 PMU_PMEVCNTR_EL0(9),
1052 PMU_PMEVCNTR_EL0(10),
1053 PMU_PMEVCNTR_EL0(11),
1054 PMU_PMEVCNTR_EL0(12),
1055 PMU_PMEVCNTR_EL0(13),
1056 PMU_PMEVCNTR_EL0(14),
1057 PMU_PMEVCNTR_EL0(15),
1058 PMU_PMEVCNTR_EL0(16),
1059 PMU_PMEVCNTR_EL0(17),
1060 PMU_PMEVCNTR_EL0(18),
1061 PMU_PMEVCNTR_EL0(19),
1062 PMU_PMEVCNTR_EL0(20),
1063 PMU_PMEVCNTR_EL0(21),
1064 PMU_PMEVCNTR_EL0(22),
1065 PMU_PMEVCNTR_EL0(23),
1066 PMU_PMEVCNTR_EL0(24),
1067 PMU_PMEVCNTR_EL0(25),
1068 PMU_PMEVCNTR_EL0(26),
1069 PMU_PMEVCNTR_EL0(27),
1070 PMU_PMEVCNTR_EL0(28),
1071 PMU_PMEVCNTR_EL0(29),
1072 PMU_PMEVCNTR_EL0(30),
1073 /* PMEVTYPERn_EL0 */
1074 PMU_PMEVTYPER_EL0(0),
1075 PMU_PMEVTYPER_EL0(1),
1076 PMU_PMEVTYPER_EL0(2),
1077 PMU_PMEVTYPER_EL0(3),
1078 PMU_PMEVTYPER_EL0(4),
1079 PMU_PMEVTYPER_EL0(5),
1080 PMU_PMEVTYPER_EL0(6),
1081 PMU_PMEVTYPER_EL0(7),
1082 PMU_PMEVTYPER_EL0(8),
1083 PMU_PMEVTYPER_EL0(9),
1084 PMU_PMEVTYPER_EL0(10),
1085 PMU_PMEVTYPER_EL0(11),
1086 PMU_PMEVTYPER_EL0(12),
1087 PMU_PMEVTYPER_EL0(13),
1088 PMU_PMEVTYPER_EL0(14),
1089 PMU_PMEVTYPER_EL0(15),
1090 PMU_PMEVTYPER_EL0(16),
1091 PMU_PMEVTYPER_EL0(17),
1092 PMU_PMEVTYPER_EL0(18),
1093 PMU_PMEVTYPER_EL0(19),
1094 PMU_PMEVTYPER_EL0(20),
1095 PMU_PMEVTYPER_EL0(21),
1096 PMU_PMEVTYPER_EL0(22),
1097 PMU_PMEVTYPER_EL0(23),
1098 PMU_PMEVTYPER_EL0(24),
1099 PMU_PMEVTYPER_EL0(25),
1100 PMU_PMEVTYPER_EL0(26),
1101 PMU_PMEVTYPER_EL0(27),
1102 PMU_PMEVTYPER_EL0(28),
1103 PMU_PMEVTYPER_EL0(29),
1104 PMU_PMEVTYPER_EL0(30),
1106 * This register resets as unknown in 64bit mode while it resets as zero
1107 * in 32bit mode. Here we choose to reset it as zero for consistency.
1109 { Op0(0b11), Op1(0b011), CRn(0b1110), CRm(0b1111), Op2(0b111),
1110 access_pmu_evtyper
, reset_val
, PMCCFILTR_EL0
, 0 },
1113 { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
1114 NULL
, reset_unknown
, DACR32_EL2
},
1116 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001),
1117 NULL
, reset_unknown
, IFSR32_EL2
},
1119 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000),
1120 NULL
, reset_val
, FPEXC32_EL2
, 0x70 },
1123 static bool trap_dbgidr(struct kvm_vcpu
*vcpu
,
1124 struct sys_reg_params
*p
,
1125 const struct sys_reg_desc
*r
)
1128 return ignore_write(vcpu
, p
);
1130 u64 dfr
= read_system_reg(SYS_ID_AA64DFR0_EL1
);
1131 u64 pfr
= read_system_reg(SYS_ID_AA64PFR0_EL1
);
1132 u32 el3
= !!cpuid_feature_extract_unsigned_field(pfr
, ID_AA64PFR0_EL3_SHIFT
);
1134 p
->regval
= ((((dfr
>> ID_AA64DFR0_WRPS_SHIFT
) & 0xf) << 28) |
1135 (((dfr
>> ID_AA64DFR0_BRPS_SHIFT
) & 0xf) << 24) |
1136 (((dfr
>> ID_AA64DFR0_CTX_CMPS_SHIFT
) & 0xf) << 20)
1137 | (6 << 16) | (el3
<< 14) | (el3
<< 12));
1142 static bool trap_debug32(struct kvm_vcpu
*vcpu
,
1143 struct sys_reg_params
*p
,
1144 const struct sys_reg_desc
*r
)
1147 vcpu_cp14(vcpu
, r
->reg
) = p
->regval
;
1148 vcpu
->arch
.debug_flags
|= KVM_ARM64_DEBUG_DIRTY
;
1150 p
->regval
= vcpu_cp14(vcpu
, r
->reg
);
1156 /* AArch32 debug register mappings
1158 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
1159 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
1161 * All control registers and watchpoint value registers are mapped to
1162 * the lower 32 bits of their AArch64 equivalents. We share the trap
1163 * handlers with the above AArch64 code which checks what mode the
1167 static bool trap_xvr(struct kvm_vcpu
*vcpu
,
1168 struct sys_reg_params
*p
,
1169 const struct sys_reg_desc
*rd
)
1171 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->reg
];
1176 val
&= 0xffffffffUL
;
1177 val
|= p
->regval
<< 32;
1180 vcpu
->arch
.debug_flags
|= KVM_ARM64_DEBUG_DIRTY
;
1182 p
->regval
= *dbg_reg
>> 32;
1185 trace_trap_reg(__func__
, rd
->reg
, p
->is_write
, *dbg_reg
);
1190 #define DBG_BCR_BVR_WCR_WVR(n) \
1192 { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
1194 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
1196 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
1198 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
1200 #define DBGBXVR(n) \
1201 { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
1204 * Trapped cp14 registers. We generally ignore most of the external
1205 * debug, on the principle that they don't really make sense to a
1206 * guest. Revisit this one day, would this principle change.
1208 static const struct sys_reg_desc cp14_regs
[] = {
1210 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr
},
1212 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi
},
1214 DBG_BCR_BVR_WCR_WVR(0),
1216 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi
},
1217 DBG_BCR_BVR_WCR_WVR(1),
1219 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32
},
1221 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32
},
1222 DBG_BCR_BVR_WCR_WVR(2),
1223 /* DBGDTR[RT]Xint */
1224 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi
},
1225 /* DBGDTR[RT]Xext */
1226 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi
},
1227 DBG_BCR_BVR_WCR_WVR(3),
1228 DBG_BCR_BVR_WCR_WVR(4),
1229 DBG_BCR_BVR_WCR_WVR(5),
1231 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi
},
1233 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi
},
1234 DBG_BCR_BVR_WCR_WVR(6),
1236 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32
},
1237 DBG_BCR_BVR_WCR_WVR(7),
1238 DBG_BCR_BVR_WCR_WVR(8),
1239 DBG_BCR_BVR_WCR_WVR(9),
1240 DBG_BCR_BVR_WCR_WVR(10),
1241 DBG_BCR_BVR_WCR_WVR(11),
1242 DBG_BCR_BVR_WCR_WVR(12),
1243 DBG_BCR_BVR_WCR_WVR(13),
1244 DBG_BCR_BVR_WCR_WVR(14),
1245 DBG_BCR_BVR_WCR_WVR(15),
1247 /* DBGDRAR (32bit) */
1248 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi
},
1252 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi
},
1255 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1
},
1259 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi
},
1262 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi
},
1275 /* DBGDSAR (32bit) */
1276 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi
},
1279 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi
},
1281 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi
},
1283 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi
},
1285 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi
},
1287 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi
},
1289 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1
},
1292 /* Trapped cp14 64bit registers */
1293 static const struct sys_reg_desc cp14_64_regs
[] = {
1294 /* DBGDRAR (64bit) */
1295 { Op1( 0), CRm( 1), .access
= trap_raz_wi
},
1297 /* DBGDSAR (64bit) */
1298 { Op1( 0), CRm( 2), .access
= trap_raz_wi
},
1301 /* Macro to expand the PMEVCNTRn register */
1302 #define PMU_PMEVCNTR(n) \
1304 { Op1(0), CRn(0b1110), \
1305 CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1308 /* Macro to expand the PMEVTYPERn register */
1309 #define PMU_PMEVTYPER(n) \
1311 { Op1(0), CRn(0b1110), \
1312 CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1313 access_pmu_evtyper }
1316 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
1317 * depending on the way they are accessed (as a 32bit or a 64bit
1320 static const struct sys_reg_desc cp15_regs
[] = {
1321 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi
},
1323 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c1_SCTLR
},
1324 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c2_TTBR0
},
1325 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg
, NULL
, c2_TTBR1
},
1326 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg
, NULL
, c2_TTBCR
},
1327 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c3_DACR
},
1328 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c5_DFSR
},
1329 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg
, NULL
, c5_IFSR
},
1330 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg
, NULL
, c5_ADFSR
},
1331 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg
, NULL
, c5_AIFSR
},
1332 { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c6_DFAR
},
1333 { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg
, NULL
, c6_IFAR
},
1336 * DC{C,I,CI}SW operations:
1338 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw
},
1339 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw
},
1340 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw
},
1343 { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr
},
1344 { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten
},
1345 { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten
},
1346 { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs
},
1347 { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc
},
1348 { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr
},
1349 { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid
},
1350 { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid
},
1351 { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr
},
1352 { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper
},
1353 { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr
},
1354 { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr
},
1355 { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten
},
1356 { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten
},
1357 { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs
},
1359 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg
, NULL
, c10_PRRR
},
1360 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg
, NULL
, c10_NMRR
},
1361 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg
, NULL
, c10_AMAIR0
},
1362 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg
, NULL
, c10_AMAIR1
},
1365 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre
},
1367 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg
, NULL
, c13_CID
},
1434 { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper
},
1437 static const struct sys_reg_desc cp15_64_regs
[] = {
1438 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg
, NULL
, c2_TTBR0
},
1439 { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr
},
1440 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi
},
1441 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg
, NULL
, c2_TTBR1
},
1444 /* Target specific emulation tables */
1445 static struct kvm_sys_reg_target_table
*target_tables
[KVM_ARM_NUM_TARGETS
];
1447 void kvm_register_target_sys_reg_table(unsigned int target
,
1448 struct kvm_sys_reg_target_table
*table
)
1450 target_tables
[target
] = table
;
1453 /* Get specific register table for this target. */
1454 static const struct sys_reg_desc
*get_target_table(unsigned target
,
1458 struct kvm_sys_reg_target_table
*table
;
1460 table
= target_tables
[target
];
1462 *num
= table
->table64
.num
;
1463 return table
->table64
.table
;
1465 *num
= table
->table32
.num
;
1466 return table
->table32
.table
;
1470 #define reg_to_match_value(x) \
1472 unsigned long val; \
1473 val = (x)->Op0 << 14; \
1474 val |= (x)->Op1 << 11; \
1475 val |= (x)->CRn << 7; \
1476 val |= (x)->CRm << 3; \
1481 static int match_sys_reg(const void *key
, const void *elt
)
1483 const unsigned long pval
= (unsigned long)key
;
1484 const struct sys_reg_desc
*r
= elt
;
1486 return pval
- reg_to_match_value(r
);
1489 static const struct sys_reg_desc
*find_reg(const struct sys_reg_params
*params
,
1490 const struct sys_reg_desc table
[],
1493 unsigned long pval
= reg_to_match_value(params
);
1495 return bsearch((void *)pval
, table
, num
, sizeof(table
[0]), match_sys_reg
);
1498 int kvm_handle_cp14_load_store(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1500 kvm_inject_undefined(vcpu
);
1505 * emulate_cp -- tries to match a sys_reg access in a handling table, and
1506 * call the corresponding trap handler.
1508 * @params: pointer to the descriptor of the access
1509 * @table: array of trap descriptors
1510 * @num: size of the trap descriptor array
1512 * Return 0 if the access has been handled, and -1 if not.
1514 static int emulate_cp(struct kvm_vcpu
*vcpu
,
1515 struct sys_reg_params
*params
,
1516 const struct sys_reg_desc
*table
,
1519 const struct sys_reg_desc
*r
;
1522 return -1; /* Not handled */
1524 r
= find_reg(params
, table
, num
);
1528 * Not having an accessor means that we have
1529 * configured a trap that we don't know how to
1530 * handle. This certainly qualifies as a gross bug
1531 * that should be fixed right away.
1535 if (likely(r
->access(vcpu
, params
, r
))) {
1536 /* Skip instruction, since it was emulated */
1537 kvm_skip_instr(vcpu
, kvm_vcpu_trap_il_is32bit(vcpu
));
1547 static void unhandled_cp_access(struct kvm_vcpu
*vcpu
,
1548 struct sys_reg_params
*params
)
1550 u8 hsr_ec
= kvm_vcpu_trap_get_class(vcpu
);
1554 case ESR_ELx_EC_CP15_32
:
1555 case ESR_ELx_EC_CP15_64
:
1558 case ESR_ELx_EC_CP14_MR
:
1559 case ESR_ELx_EC_CP14_64
:
1566 kvm_err("Unsupported guest CP%d access at: %08lx\n",
1567 cp
, *vcpu_pc(vcpu
));
1568 print_sys_reg_instr(params
);
1569 kvm_inject_undefined(vcpu
);
1573 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
1574 * @vcpu: The VCPU pointer
1575 * @run: The kvm_run struct
1577 static int kvm_handle_cp_64(struct kvm_vcpu
*vcpu
,
1578 const struct sys_reg_desc
*global
,
1580 const struct sys_reg_desc
*target_specific
,
1583 struct sys_reg_params params
;
1584 u32 hsr
= kvm_vcpu_get_hsr(vcpu
);
1585 int Rt
= kvm_vcpu_sys_get_rt(vcpu
);
1586 int Rt2
= (hsr
>> 10) & 0x1f;
1588 params
.is_aarch32
= true;
1589 params
.is_32bit
= false;
1590 params
.CRm
= (hsr
>> 1) & 0xf;
1591 params
.is_write
= ((hsr
& 1) == 0);
1594 params
.Op1
= (hsr
>> 16) & 0xf;
1599 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
1600 * backends between AArch32 and AArch64, we get away with it.
1602 if (params
.is_write
) {
1603 params
.regval
= vcpu_get_reg(vcpu
, Rt
) & 0xffffffff;
1604 params
.regval
|= vcpu_get_reg(vcpu
, Rt2
) << 32;
1607 if (!emulate_cp(vcpu
, ¶ms
, target_specific
, nr_specific
))
1609 if (!emulate_cp(vcpu
, ¶ms
, global
, nr_global
))
1612 unhandled_cp_access(vcpu
, ¶ms
);
1615 /* Split up the value between registers for the read side */
1616 if (!params
.is_write
) {
1617 vcpu_set_reg(vcpu
, Rt
, lower_32_bits(params
.regval
));
1618 vcpu_set_reg(vcpu
, Rt2
, upper_32_bits(params
.regval
));
1625 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
1626 * @vcpu: The VCPU pointer
1627 * @run: The kvm_run struct
1629 static int kvm_handle_cp_32(struct kvm_vcpu
*vcpu
,
1630 const struct sys_reg_desc
*global
,
1632 const struct sys_reg_desc
*target_specific
,
1635 struct sys_reg_params params
;
1636 u32 hsr
= kvm_vcpu_get_hsr(vcpu
);
1637 int Rt
= kvm_vcpu_sys_get_rt(vcpu
);
1639 params
.is_aarch32
= true;
1640 params
.is_32bit
= true;
1641 params
.CRm
= (hsr
>> 1) & 0xf;
1642 params
.regval
= vcpu_get_reg(vcpu
, Rt
);
1643 params
.is_write
= ((hsr
& 1) == 0);
1644 params
.CRn
= (hsr
>> 10) & 0xf;
1646 params
.Op1
= (hsr
>> 14) & 0x7;
1647 params
.Op2
= (hsr
>> 17) & 0x7;
1649 if (!emulate_cp(vcpu
, ¶ms
, target_specific
, nr_specific
) ||
1650 !emulate_cp(vcpu
, ¶ms
, global
, nr_global
)) {
1651 if (!params
.is_write
)
1652 vcpu_set_reg(vcpu
, Rt
, params
.regval
);
1656 unhandled_cp_access(vcpu
, ¶ms
);
1660 int kvm_handle_cp15_64(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1662 const struct sys_reg_desc
*target_specific
;
1665 target_specific
= get_target_table(vcpu
->arch
.target
, false, &num
);
1666 return kvm_handle_cp_64(vcpu
,
1667 cp15_64_regs
, ARRAY_SIZE(cp15_64_regs
),
1668 target_specific
, num
);
1671 int kvm_handle_cp15_32(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1673 const struct sys_reg_desc
*target_specific
;
1676 target_specific
= get_target_table(vcpu
->arch
.target
, false, &num
);
1677 return kvm_handle_cp_32(vcpu
,
1678 cp15_regs
, ARRAY_SIZE(cp15_regs
),
1679 target_specific
, num
);
1682 int kvm_handle_cp14_64(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1684 return kvm_handle_cp_64(vcpu
,
1685 cp14_64_regs
, ARRAY_SIZE(cp14_64_regs
),
1689 int kvm_handle_cp14_32(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1691 return kvm_handle_cp_32(vcpu
,
1692 cp14_regs
, ARRAY_SIZE(cp14_regs
),
1696 static int emulate_sys_reg(struct kvm_vcpu
*vcpu
,
1697 struct sys_reg_params
*params
)
1700 const struct sys_reg_desc
*table
, *r
;
1702 table
= get_target_table(vcpu
->arch
.target
, true, &num
);
1704 /* Search target-specific then generic table. */
1705 r
= find_reg(params
, table
, num
);
1707 r
= find_reg(params
, sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
));
1711 * Not having an accessor means that we have
1712 * configured a trap that we don't know how to
1713 * handle. This certainly qualifies as a gross bug
1714 * that should be fixed right away.
1718 if (likely(r
->access(vcpu
, params
, r
))) {
1719 /* Skip instruction, since it was emulated */
1720 kvm_skip_instr(vcpu
, kvm_vcpu_trap_il_is32bit(vcpu
));
1723 /* If access function fails, it should complain. */
1725 kvm_err("Unsupported guest sys_reg access at: %lx\n",
1727 print_sys_reg_instr(params
);
1729 kvm_inject_undefined(vcpu
);
1733 static void reset_sys_reg_descs(struct kvm_vcpu
*vcpu
,
1734 const struct sys_reg_desc
*table
, size_t num
)
1738 for (i
= 0; i
< num
; i
++)
1740 table
[i
].reset(vcpu
, &table
[i
]);
1744 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
1745 * @vcpu: The VCPU pointer
1746 * @run: The kvm_run struct
1748 int kvm_handle_sys_reg(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1750 struct sys_reg_params params
;
1751 unsigned long esr
= kvm_vcpu_get_hsr(vcpu
);
1752 int Rt
= kvm_vcpu_sys_get_rt(vcpu
);
1755 trace_kvm_handle_sys_reg(esr
);
1757 params
.is_aarch32
= false;
1758 params
.is_32bit
= false;
1759 params
.Op0
= (esr
>> 20) & 3;
1760 params
.Op1
= (esr
>> 14) & 0x7;
1761 params
.CRn
= (esr
>> 10) & 0xf;
1762 params
.CRm
= (esr
>> 1) & 0xf;
1763 params
.Op2
= (esr
>> 17) & 0x7;
1764 params
.regval
= vcpu_get_reg(vcpu
, Rt
);
1765 params
.is_write
= !(esr
& 1);
1767 ret
= emulate_sys_reg(vcpu
, ¶ms
);
1769 if (!params
.is_write
)
1770 vcpu_set_reg(vcpu
, Rt
, params
.regval
);
1774 /******************************************************************************
1776 *****************************************************************************/
1778 static bool index_to_params(u64 id
, struct sys_reg_params
*params
)
1780 switch (id
& KVM_REG_SIZE_MASK
) {
1781 case KVM_REG_SIZE_U64
:
1782 /* Any unused index bits means it's not valid. */
1783 if (id
& ~(KVM_REG_ARCH_MASK
| KVM_REG_SIZE_MASK
1784 | KVM_REG_ARM_COPROC_MASK
1785 | KVM_REG_ARM64_SYSREG_OP0_MASK
1786 | KVM_REG_ARM64_SYSREG_OP1_MASK
1787 | KVM_REG_ARM64_SYSREG_CRN_MASK
1788 | KVM_REG_ARM64_SYSREG_CRM_MASK
1789 | KVM_REG_ARM64_SYSREG_OP2_MASK
))
1791 params
->Op0
= ((id
& KVM_REG_ARM64_SYSREG_OP0_MASK
)
1792 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT
);
1793 params
->Op1
= ((id
& KVM_REG_ARM64_SYSREG_OP1_MASK
)
1794 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT
);
1795 params
->CRn
= ((id
& KVM_REG_ARM64_SYSREG_CRN_MASK
)
1796 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT
);
1797 params
->CRm
= ((id
& KVM_REG_ARM64_SYSREG_CRM_MASK
)
1798 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT
);
1799 params
->Op2
= ((id
& KVM_REG_ARM64_SYSREG_OP2_MASK
)
1800 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT
);
1807 /* Decode an index value, and find the sys_reg_desc entry. */
1808 static const struct sys_reg_desc
*index_to_sys_reg_desc(struct kvm_vcpu
*vcpu
,
1812 const struct sys_reg_desc
*table
, *r
;
1813 struct sys_reg_params params
;
1815 /* We only do sys_reg for now. */
1816 if ((id
& KVM_REG_ARM_COPROC_MASK
) != KVM_REG_ARM64_SYSREG
)
1819 if (!index_to_params(id
, ¶ms
))
1822 table
= get_target_table(vcpu
->arch
.target
, true, &num
);
1823 r
= find_reg(¶ms
, table
, num
);
1825 r
= find_reg(¶ms
, sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
));
1827 /* Not saved in the sys_reg array? */
1835 * These are the invariant sys_reg registers: we let the guest see the
1836 * host versions of these, so they're part of the guest state.
1838 * A future CPU may provide a mechanism to present different values to
1839 * the guest, or a future kvm may trap them.
1842 #define FUNCTION_INVARIANT(reg) \
1843 static void get_##reg(struct kvm_vcpu *v, \
1844 const struct sys_reg_desc *r) \
1846 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
1849 FUNCTION_INVARIANT(midr_el1
)
1850 FUNCTION_INVARIANT(ctr_el0
)
1851 FUNCTION_INVARIANT(revidr_el1
)
1852 FUNCTION_INVARIANT(id_pfr0_el1
)
1853 FUNCTION_INVARIANT(id_pfr1_el1
)
1854 FUNCTION_INVARIANT(id_dfr0_el1
)
1855 FUNCTION_INVARIANT(id_afr0_el1
)
1856 FUNCTION_INVARIANT(id_mmfr0_el1
)
1857 FUNCTION_INVARIANT(id_mmfr1_el1
)
1858 FUNCTION_INVARIANT(id_mmfr2_el1
)
1859 FUNCTION_INVARIANT(id_mmfr3_el1
)
1860 FUNCTION_INVARIANT(id_isar0_el1
)
1861 FUNCTION_INVARIANT(id_isar1_el1
)
1862 FUNCTION_INVARIANT(id_isar2_el1
)
1863 FUNCTION_INVARIANT(id_isar3_el1
)
1864 FUNCTION_INVARIANT(id_isar4_el1
)
1865 FUNCTION_INVARIANT(id_isar5_el1
)
1866 FUNCTION_INVARIANT(clidr_el1
)
1867 FUNCTION_INVARIANT(aidr_el1
)
1869 /* ->val is filled in by kvm_sys_reg_table_init() */
1870 static struct sys_reg_desc invariant_sys_regs
[] = {
1871 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000),
1872 NULL
, get_midr_el1
},
1873 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110),
1874 NULL
, get_revidr_el1
},
1875 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000),
1876 NULL
, get_id_pfr0_el1
},
1877 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001),
1878 NULL
, get_id_pfr1_el1
},
1879 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010),
1880 NULL
, get_id_dfr0_el1
},
1881 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011),
1882 NULL
, get_id_afr0_el1
},
1883 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100),
1884 NULL
, get_id_mmfr0_el1
},
1885 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101),
1886 NULL
, get_id_mmfr1_el1
},
1887 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110),
1888 NULL
, get_id_mmfr2_el1
},
1889 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111),
1890 NULL
, get_id_mmfr3_el1
},
1891 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
1892 NULL
, get_id_isar0_el1
},
1893 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001),
1894 NULL
, get_id_isar1_el1
},
1895 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
1896 NULL
, get_id_isar2_el1
},
1897 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011),
1898 NULL
, get_id_isar3_el1
},
1899 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100),
1900 NULL
, get_id_isar4_el1
},
1901 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101),
1902 NULL
, get_id_isar5_el1
},
1903 { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001),
1904 NULL
, get_clidr_el1
},
1905 { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111),
1906 NULL
, get_aidr_el1
},
1907 { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001),
1908 NULL
, get_ctr_el0
},
1911 static int reg_from_user(u64
*val
, const void __user
*uaddr
, u64 id
)
1913 if (copy_from_user(val
, uaddr
, KVM_REG_SIZE(id
)) != 0)
1918 static int reg_to_user(void __user
*uaddr
, const u64
*val
, u64 id
)
1920 if (copy_to_user(uaddr
, val
, KVM_REG_SIZE(id
)) != 0)
1925 static int get_invariant_sys_reg(u64 id
, void __user
*uaddr
)
1927 struct sys_reg_params params
;
1928 const struct sys_reg_desc
*r
;
1930 if (!index_to_params(id
, ¶ms
))
1933 r
= find_reg(¶ms
, invariant_sys_regs
, ARRAY_SIZE(invariant_sys_regs
));
1937 return reg_to_user(uaddr
, &r
->val
, id
);
1940 static int set_invariant_sys_reg(u64 id
, void __user
*uaddr
)
1942 struct sys_reg_params params
;
1943 const struct sys_reg_desc
*r
;
1945 u64 val
= 0; /* Make sure high bits are 0 for 32-bit regs */
1947 if (!index_to_params(id
, ¶ms
))
1949 r
= find_reg(¶ms
, invariant_sys_regs
, ARRAY_SIZE(invariant_sys_regs
));
1953 err
= reg_from_user(&val
, uaddr
, id
);
1957 /* This is what we mean by invariant: you can't change it. */
1964 static bool is_valid_cache(u32 val
)
1968 if (val
>= CSSELR_MAX
)
1971 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
1973 ctype
= (cache_levels
>> (level
* 3)) & 7;
1976 case 0: /* No cache */
1978 case 1: /* Instruction cache only */
1980 case 2: /* Data cache only */
1981 case 4: /* Unified cache */
1983 case 3: /* Separate instruction and data caches */
1985 default: /* Reserved: we can't know instruction or data. */
1990 static int demux_c15_get(u64 id
, void __user
*uaddr
)
1993 u32 __user
*uval
= uaddr
;
1995 /* Fail if we have unknown bits set. */
1996 if (id
& ~(KVM_REG_ARCH_MASK
|KVM_REG_SIZE_MASK
|KVM_REG_ARM_COPROC_MASK
1997 | ((1 << KVM_REG_ARM_COPROC_SHIFT
)-1)))
2000 switch (id
& KVM_REG_ARM_DEMUX_ID_MASK
) {
2001 case KVM_REG_ARM_DEMUX_ID_CCSIDR
:
2002 if (KVM_REG_SIZE(id
) != 4)
2004 val
= (id
& KVM_REG_ARM_DEMUX_VAL_MASK
)
2005 >> KVM_REG_ARM_DEMUX_VAL_SHIFT
;
2006 if (!is_valid_cache(val
))
2009 return put_user(get_ccsidr(val
), uval
);
2015 static int demux_c15_set(u64 id
, void __user
*uaddr
)
2018 u32 __user
*uval
= uaddr
;
2020 /* Fail if we have unknown bits set. */
2021 if (id
& ~(KVM_REG_ARCH_MASK
|KVM_REG_SIZE_MASK
|KVM_REG_ARM_COPROC_MASK
2022 | ((1 << KVM_REG_ARM_COPROC_SHIFT
)-1)))
2025 switch (id
& KVM_REG_ARM_DEMUX_ID_MASK
) {
2026 case KVM_REG_ARM_DEMUX_ID_CCSIDR
:
2027 if (KVM_REG_SIZE(id
) != 4)
2029 val
= (id
& KVM_REG_ARM_DEMUX_VAL_MASK
)
2030 >> KVM_REG_ARM_DEMUX_VAL_SHIFT
;
2031 if (!is_valid_cache(val
))
2034 if (get_user(newval
, uval
))
2037 /* This is also invariant: you can't change it. */
2038 if (newval
!= get_ccsidr(val
))
2046 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
2048 const struct sys_reg_desc
*r
;
2049 void __user
*uaddr
= (void __user
*)(unsigned long)reg
->addr
;
2051 if ((reg
->id
& KVM_REG_ARM_COPROC_MASK
) == KVM_REG_ARM_DEMUX
)
2052 return demux_c15_get(reg
->id
, uaddr
);
2054 if (KVM_REG_SIZE(reg
->id
) != sizeof(__u64
))
2057 r
= index_to_sys_reg_desc(vcpu
, reg
->id
);
2059 return get_invariant_sys_reg(reg
->id
, uaddr
);
2062 return (r
->get_user
)(vcpu
, r
, reg
, uaddr
);
2064 return reg_to_user(uaddr
, &vcpu_sys_reg(vcpu
, r
->reg
), reg
->id
);
2067 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
2069 const struct sys_reg_desc
*r
;
2070 void __user
*uaddr
= (void __user
*)(unsigned long)reg
->addr
;
2072 if ((reg
->id
& KVM_REG_ARM_COPROC_MASK
) == KVM_REG_ARM_DEMUX
)
2073 return demux_c15_set(reg
->id
, uaddr
);
2075 if (KVM_REG_SIZE(reg
->id
) != sizeof(__u64
))
2078 r
= index_to_sys_reg_desc(vcpu
, reg
->id
);
2080 return set_invariant_sys_reg(reg
->id
, uaddr
);
2083 return (r
->set_user
)(vcpu
, r
, reg
, uaddr
);
2085 return reg_from_user(&vcpu_sys_reg(vcpu
, r
->reg
), uaddr
, reg
->id
);
2088 static unsigned int num_demux_regs(void)
2090 unsigned int i
, count
= 0;
2092 for (i
= 0; i
< CSSELR_MAX
; i
++)
2093 if (is_valid_cache(i
))
2099 static int write_demux_regids(u64 __user
*uindices
)
2101 u64 val
= KVM_REG_ARM64
| KVM_REG_SIZE_U32
| KVM_REG_ARM_DEMUX
;
2104 val
|= KVM_REG_ARM_DEMUX_ID_CCSIDR
;
2105 for (i
= 0; i
< CSSELR_MAX
; i
++) {
2106 if (!is_valid_cache(i
))
2108 if (put_user(val
| i
, uindices
))
2115 static u64
sys_reg_to_index(const struct sys_reg_desc
*reg
)
2117 return (KVM_REG_ARM64
| KVM_REG_SIZE_U64
|
2118 KVM_REG_ARM64_SYSREG
|
2119 (reg
->Op0
<< KVM_REG_ARM64_SYSREG_OP0_SHIFT
) |
2120 (reg
->Op1
<< KVM_REG_ARM64_SYSREG_OP1_SHIFT
) |
2121 (reg
->CRn
<< KVM_REG_ARM64_SYSREG_CRN_SHIFT
) |
2122 (reg
->CRm
<< KVM_REG_ARM64_SYSREG_CRM_SHIFT
) |
2123 (reg
->Op2
<< KVM_REG_ARM64_SYSREG_OP2_SHIFT
));
2126 static bool copy_reg_to_user(const struct sys_reg_desc
*reg
, u64 __user
**uind
)
2131 if (put_user(sys_reg_to_index(reg
), *uind
))
2138 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
2139 static int walk_sys_regs(struct kvm_vcpu
*vcpu
, u64 __user
*uind
)
2141 const struct sys_reg_desc
*i1
, *i2
, *end1
, *end2
;
2142 unsigned int total
= 0;
2145 /* We check for duplicates here, to allow arch-specific overrides. */
2146 i1
= get_target_table(vcpu
->arch
.target
, true, &num
);
2149 end2
= sys_reg_descs
+ ARRAY_SIZE(sys_reg_descs
);
2151 BUG_ON(i1
== end1
|| i2
== end2
);
2153 /* Walk carefully, as both tables may refer to the same register. */
2155 int cmp
= cmp_sys_reg(i1
, i2
);
2156 /* target-specific overrides generic entry. */
2158 /* Ignore registers we trap but don't save. */
2160 if (!copy_reg_to_user(i1
, &uind
))
2165 /* Ignore registers we trap but don't save. */
2167 if (!copy_reg_to_user(i2
, &uind
))
2173 if (cmp
<= 0 && ++i1
== end1
)
2175 if (cmp
>= 0 && ++i2
== end2
)
2181 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu
*vcpu
)
2183 return ARRAY_SIZE(invariant_sys_regs
)
2185 + walk_sys_regs(vcpu
, (u64 __user
*)NULL
);
2188 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu
*vcpu
, u64 __user
*uindices
)
2193 /* Then give them all the invariant registers' indices. */
2194 for (i
= 0; i
< ARRAY_SIZE(invariant_sys_regs
); i
++) {
2195 if (put_user(sys_reg_to_index(&invariant_sys_regs
[i
]), uindices
))
2200 err
= walk_sys_regs(vcpu
, uindices
);
2205 return write_demux_regids(uindices
);
2208 static int check_sysreg_table(const struct sys_reg_desc
*table
, unsigned int n
)
2212 for (i
= 1; i
< n
; i
++) {
2213 if (cmp_sys_reg(&table
[i
-1], &table
[i
]) >= 0) {
2214 kvm_err("sys_reg table %p out of order (%d)\n", table
, i
- 1);
2222 void kvm_sys_reg_table_init(void)
2225 struct sys_reg_desc clidr
;
2227 /* Make sure tables are unique and in order. */
2228 BUG_ON(check_sysreg_table(sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
)));
2229 BUG_ON(check_sysreg_table(cp14_regs
, ARRAY_SIZE(cp14_regs
)));
2230 BUG_ON(check_sysreg_table(cp14_64_regs
, ARRAY_SIZE(cp14_64_regs
)));
2231 BUG_ON(check_sysreg_table(cp15_regs
, ARRAY_SIZE(cp15_regs
)));
2232 BUG_ON(check_sysreg_table(cp15_64_regs
, ARRAY_SIZE(cp15_64_regs
)));
2233 BUG_ON(check_sysreg_table(invariant_sys_regs
, ARRAY_SIZE(invariant_sys_regs
)));
2235 /* We abuse the reset function to overwrite the table itself. */
2236 for (i
= 0; i
< ARRAY_SIZE(invariant_sys_regs
); i
++)
2237 invariant_sys_regs
[i
].reset(NULL
, &invariant_sys_regs
[i
]);
2240 * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
2242 * If software reads the Cache Type fields from Ctype1
2243 * upwards, once it has seen a value of 0b000, no caches
2244 * exist at further-out levels of the hierarchy. So, for
2245 * example, if Ctype3 is the first Cache Type field with a
2246 * value of 0b000, the values of Ctype4 to Ctype7 must be
2249 get_clidr_el1(NULL
, &clidr
); /* Ugly... */
2250 cache_levels
= clidr
.val
;
2251 for (i
= 0; i
< 7; i
++)
2252 if (((cache_levels
>> (i
*3)) & 7) == 0)
2254 /* Clear all higher bits. */
2255 cache_levels
&= (1 << (i
*3))-1;
2259 * kvm_reset_sys_regs - sets system registers to reset value
2260 * @vcpu: The VCPU pointer
2262 * This function finds the right table above and sets the registers on the
2263 * virtual CPU struct to their architecturally defined reset values.
2265 void kvm_reset_sys_regs(struct kvm_vcpu
*vcpu
)
2268 const struct sys_reg_desc
*table
;
2270 /* Catch someone adding a register without putting in reset entry. */
2271 memset(&vcpu
->arch
.ctxt
.sys_regs
, 0x42, sizeof(vcpu
->arch
.ctxt
.sys_regs
));
2273 /* Generic chip reset first (so target could override). */
2274 reset_sys_reg_descs(vcpu
, sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
));
2276 table
= get_target_table(vcpu
->arch
.target
, true, &num
);
2277 reset_sys_reg_descs(vcpu
, table
, num
);
2279 for (num
= 1; num
< NR_SYS_REGS
; num
++)
2280 if (vcpu_sys_reg(vcpu
, num
) == 0x4242424242424242)
2281 panic("Didn't reset vcpu_sys_reg(%zi)", num
);