2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * Derived from arch/arm/kvm/coproc.c:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Authors: Rusty Russell <rusty@rustcorp.com.au>
8 * Christoffer Dall <c.dall@virtualopensystems.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include <linux/kvm_host.h>
25 #include <linux/uaccess.h>
27 #include <asm/cacheflush.h>
28 #include <asm/cputype.h>
29 #include <asm/debug-monitors.h>
31 #include <asm/kvm_arm.h>
32 #include <asm/kvm_coproc.h>
33 #include <asm/kvm_emulate.h>
34 #include <asm/kvm_host.h>
35 #include <asm/kvm_mmu.h>
37 #include <trace/events/kvm.h>
44 * All of this file is extremly similar to the ARM coproc.c, but the
45 * types are different. My gut feeling is that it should be pretty
46 * easy to merge, but that would be an ABI breakage -- again. VFP
47 * would also need to be abstracted.
49 * For AArch32, we only take care of what is being trapped. Anything
50 * that has to do with init and userspace access has to go via the
54 /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
55 static u32 cache_levels
;
57 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
60 /* Which cache CCSIDR represents depends on CSSELR value. */
61 static u32
get_ccsidr(u32 csselr
)
65 /* Make sure noone else changes CSSELR during this! */
67 /* Put value into CSSELR */
68 asm volatile("msr csselr_el1, %x0" : : "r" (csselr
));
70 /* Read result out of CCSIDR */
71 asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr
));
78 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
80 static bool access_dcsw(struct kvm_vcpu
*vcpu
,
81 const struct sys_reg_params
*p
,
82 const struct sys_reg_desc
*r
)
85 return read_from_write_only(vcpu
, p
);
87 kvm_set_way_flush(vcpu
);
92 * Generic accessor for VM registers. Only called as long as HCR_TVM
93 * is set. If the guest enables the MMU, we stop trapping the VM
94 * sys_regs and leave it in complete control of the caches.
96 static bool access_vm_reg(struct kvm_vcpu
*vcpu
,
97 const struct sys_reg_params
*p
,
98 const struct sys_reg_desc
*r
)
101 bool was_enabled
= vcpu_has_cache_enabled(vcpu
);
103 BUG_ON(!p
->is_write
);
105 val
= *vcpu_reg(vcpu
, p
->Rt
);
106 if (!p
->is_aarch32
) {
107 vcpu_sys_reg(vcpu
, r
->reg
) = val
;
110 vcpu_cp15_64_high(vcpu
, r
->reg
) = val
>> 32;
111 vcpu_cp15_64_low(vcpu
, r
->reg
) = val
& 0xffffffffUL
;
114 kvm_toggle_cache(vcpu
, was_enabled
);
119 * Trap handler for the GICv3 SGI generation system register.
120 * Forward the request to the VGIC emulation.
121 * The cp15_64 code makes sure this automatically works
122 * for both AArch64 and AArch32 accesses.
124 static bool access_gic_sgi(struct kvm_vcpu
*vcpu
,
125 const struct sys_reg_params
*p
,
126 const struct sys_reg_desc
*r
)
131 return read_from_write_only(vcpu
, p
);
133 val
= *vcpu_reg(vcpu
, p
->Rt
);
134 vgic_v3_dispatch_sgi(vcpu
, val
);
139 static bool trap_raz_wi(struct kvm_vcpu
*vcpu
,
140 const struct sys_reg_params
*p
,
141 const struct sys_reg_desc
*r
)
144 return ignore_write(vcpu
, p
);
146 return read_zero(vcpu
, p
);
149 static bool trap_oslsr_el1(struct kvm_vcpu
*vcpu
,
150 const struct sys_reg_params
*p
,
151 const struct sys_reg_desc
*r
)
154 return ignore_write(vcpu
, p
);
156 *vcpu_reg(vcpu
, p
->Rt
) = (1 << 3);
161 static bool trap_dbgauthstatus_el1(struct kvm_vcpu
*vcpu
,
162 const struct sys_reg_params
*p
,
163 const struct sys_reg_desc
*r
)
166 return ignore_write(vcpu
, p
);
169 asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val
));
170 *vcpu_reg(vcpu
, p
->Rt
) = val
;
176 * We want to avoid world-switching all the DBG registers all the
179 * - If we've touched any debug register, it is likely that we're
180 * going to touch more of them. It then makes sense to disable the
181 * traps and start doing the save/restore dance
182 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
183 * then mandatory to save/restore the registers, as the guest
186 * For this, we use a DIRTY bit, indicating the guest has modified the
187 * debug registers, used as follow:
190 * - If the dirty bit is set (because we're coming back from trapping),
191 * disable the traps, save host registers, restore guest registers.
192 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
193 * set the dirty bit, disable the traps, save host registers,
194 * restore guest registers.
195 * - Otherwise, enable the traps
198 * - If the dirty bit is set, save guest registers, restore host
199 * registers and clear the dirty bit. This ensure that the host can
200 * now use the debug registers.
202 static bool trap_debug_regs(struct kvm_vcpu
*vcpu
,
203 const struct sys_reg_params
*p
,
204 const struct sys_reg_desc
*r
)
207 vcpu_sys_reg(vcpu
, r
->reg
) = *vcpu_reg(vcpu
, p
->Rt
);
208 vcpu
->arch
.debug_flags
|= KVM_ARM64_DEBUG_DIRTY
;
210 *vcpu_reg(vcpu
, p
->Rt
) = vcpu_sys_reg(vcpu
, r
->reg
);
213 trace_trap_reg(__func__
, r
->reg
, p
->is_write
, *vcpu_reg(vcpu
, p
->Rt
));
219 * reg_to_dbg/dbg_to_reg
221 * A 32 bit write to a debug register leave top bits alone
222 * A 32 bit read from a debug register only returns the bottom bits
224 * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
225 * hyp.S code switches between host and guest values in future.
227 static inline void reg_to_dbg(struct kvm_vcpu
*vcpu
,
228 const struct sys_reg_params
*p
,
231 u64 val
= *vcpu_reg(vcpu
, p
->Rt
);
235 val
|= ((*dbg_reg
>> 32) << 32);
239 vcpu
->arch
.debug_flags
|= KVM_ARM64_DEBUG_DIRTY
;
242 static inline void dbg_to_reg(struct kvm_vcpu
*vcpu
,
243 const struct sys_reg_params
*p
,
251 *vcpu_reg(vcpu
, p
->Rt
) = val
;
254 static inline bool trap_bvr(struct kvm_vcpu
*vcpu
,
255 const struct sys_reg_params
*p
,
256 const struct sys_reg_desc
*rd
)
258 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->reg
];
261 reg_to_dbg(vcpu
, p
, dbg_reg
);
263 dbg_to_reg(vcpu
, p
, dbg_reg
);
265 trace_trap_reg(__func__
, rd
->reg
, p
->is_write
, *dbg_reg
);
270 static int set_bvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
271 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
273 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->reg
];
275 if (copy_from_user(r
, uaddr
, KVM_REG_SIZE(reg
->id
)) != 0)
280 static int get_bvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
281 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
283 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->reg
];
285 if (copy_to_user(uaddr
, r
, KVM_REG_SIZE(reg
->id
)) != 0)
290 static inline void reset_bvr(struct kvm_vcpu
*vcpu
,
291 const struct sys_reg_desc
*rd
)
293 vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->reg
] = rd
->val
;
296 static inline bool trap_bcr(struct kvm_vcpu
*vcpu
,
297 const struct sys_reg_params
*p
,
298 const struct sys_reg_desc
*rd
)
300 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->reg
];
303 reg_to_dbg(vcpu
, p
, dbg_reg
);
305 dbg_to_reg(vcpu
, p
, dbg_reg
);
307 trace_trap_reg(__func__
, rd
->reg
, p
->is_write
, *dbg_reg
);
312 static int set_bcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
313 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
315 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->reg
];
317 if (copy_from_user(r
, uaddr
, KVM_REG_SIZE(reg
->id
)) != 0)
323 static int get_bcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
324 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
326 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->reg
];
328 if (copy_to_user(uaddr
, r
, KVM_REG_SIZE(reg
->id
)) != 0)
333 static inline void reset_bcr(struct kvm_vcpu
*vcpu
,
334 const struct sys_reg_desc
*rd
)
336 vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->reg
] = rd
->val
;
339 static inline bool trap_wvr(struct kvm_vcpu
*vcpu
,
340 const struct sys_reg_params
*p
,
341 const struct sys_reg_desc
*rd
)
343 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->reg
];
346 reg_to_dbg(vcpu
, p
, dbg_reg
);
348 dbg_to_reg(vcpu
, p
, dbg_reg
);
350 trace_trap_reg(__func__
, rd
->reg
, p
->is_write
,
351 vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->reg
]);
356 static int set_wvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
357 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
359 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->reg
];
361 if (copy_from_user(r
, uaddr
, KVM_REG_SIZE(reg
->id
)) != 0)
366 static int get_wvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
367 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
369 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->reg
];
371 if (copy_to_user(uaddr
, r
, KVM_REG_SIZE(reg
->id
)) != 0)
376 static inline void reset_wvr(struct kvm_vcpu
*vcpu
,
377 const struct sys_reg_desc
*rd
)
379 vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->reg
] = rd
->val
;
382 static inline bool trap_wcr(struct kvm_vcpu
*vcpu
,
383 const struct sys_reg_params
*p
,
384 const struct sys_reg_desc
*rd
)
386 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->reg
];
389 reg_to_dbg(vcpu
, p
, dbg_reg
);
391 dbg_to_reg(vcpu
, p
, dbg_reg
);
393 trace_trap_reg(__func__
, rd
->reg
, p
->is_write
, *dbg_reg
);
398 static int set_wcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
399 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
401 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->reg
];
403 if (copy_from_user(r
, uaddr
, KVM_REG_SIZE(reg
->id
)) != 0)
408 static int get_wcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
409 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
411 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->reg
];
413 if (copy_to_user(uaddr
, r
, KVM_REG_SIZE(reg
->id
)) != 0)
418 static inline void reset_wcr(struct kvm_vcpu
*vcpu
,
419 const struct sys_reg_desc
*rd
)
421 vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->reg
] = rd
->val
;
424 static void reset_amair_el1(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
428 asm volatile("mrs %0, amair_el1\n" : "=r" (amair
));
429 vcpu_sys_reg(vcpu
, AMAIR_EL1
) = amair
;
432 static void reset_mpidr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
437 * Map the vcpu_id into the first three affinity level fields of
438 * the MPIDR. We limit the number of VCPUs in level 0 due to a
439 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
440 * of the GICv3 to be able to address each CPU directly when
443 mpidr
= (vcpu
->vcpu_id
& 0x0f) << MPIDR_LEVEL_SHIFT(0);
444 mpidr
|= ((vcpu
->vcpu_id
>> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
445 mpidr
|= ((vcpu
->vcpu_id
>> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
446 vcpu_sys_reg(vcpu
, MPIDR_EL1
) = (1ULL << 31) | mpidr
;
449 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
450 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
452 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \
453 trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \
455 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \
456 trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \
458 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \
459 trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \
461 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \
462 trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
465 * Architected system registers.
466 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
468 * We could trap ID_DFR0 and tell the guest we don't support performance
469 * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
470 * NAKed, so it will read the PMCR anyway.
472 * Therefore we tell the guest we have 0 counters. Unfortunately, we
473 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
474 * all PM registers, which doesn't crash the guest kernel at least.
476 * Debug handling: We do trap most, if not all debug related system
477 * registers. The implementation is good enough to ensure that a guest
478 * can use these with minimal performance degradation. The drawback is
479 * that we don't implement any of the external debug, none of the
480 * OSlock protocol. This should be revisited if we ever encounter a
481 * more demanding guest...
483 static const struct sys_reg_desc sys_reg_descs
[] = {
485 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010),
488 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010),
491 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
494 DBG_BCR_BVR_WCR_WVR_EL1(0),
495 DBG_BCR_BVR_WCR_WVR_EL1(1),
497 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
498 trap_debug_regs
, reset_val
, MDCCINT_EL1
, 0 },
500 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
501 trap_debug_regs
, reset_val
, MDSCR_EL1
, 0 },
502 DBG_BCR_BVR_WCR_WVR_EL1(2),
503 DBG_BCR_BVR_WCR_WVR_EL1(3),
504 DBG_BCR_BVR_WCR_WVR_EL1(4),
505 DBG_BCR_BVR_WCR_WVR_EL1(5),
506 DBG_BCR_BVR_WCR_WVR_EL1(6),
507 DBG_BCR_BVR_WCR_WVR_EL1(7),
508 DBG_BCR_BVR_WCR_WVR_EL1(8),
509 DBG_BCR_BVR_WCR_WVR_EL1(9),
510 DBG_BCR_BVR_WCR_WVR_EL1(10),
511 DBG_BCR_BVR_WCR_WVR_EL1(11),
512 DBG_BCR_BVR_WCR_WVR_EL1(12),
513 DBG_BCR_BVR_WCR_WVR_EL1(13),
514 DBG_BCR_BVR_WCR_WVR_EL1(14),
515 DBG_BCR_BVR_WCR_WVR_EL1(15),
518 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
521 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100),
524 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100),
527 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100),
530 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100),
532 /* DBGCLAIMSET_EL1 */
533 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110),
535 /* DBGCLAIMCLR_EL1 */
536 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110),
538 /* DBGAUTHSTATUS_EL1 */
539 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110),
540 trap_dbgauthstatus_el1
},
543 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000),
546 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000),
548 /* DBGDTR[TR]X_EL0 */
549 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000),
553 { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
554 NULL
, reset_val
, DBGVCR32_EL2
, 0 },
557 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101),
558 NULL
, reset_mpidr
, MPIDR_EL1
},
560 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
561 access_vm_reg
, reset_val
, SCTLR_EL1
, 0x00C50078 },
563 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
564 NULL
, reset_val
, CPACR_EL1
, 0 },
566 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
567 access_vm_reg
, reset_unknown
, TTBR0_EL1
},
569 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
570 access_vm_reg
, reset_unknown
, TTBR1_EL1
},
572 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
573 access_vm_reg
, reset_val
, TCR_EL1
, 0 },
576 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
577 access_vm_reg
, reset_unknown
, AFSR0_EL1
},
579 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
580 access_vm_reg
, reset_unknown
, AFSR1_EL1
},
582 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
583 access_vm_reg
, reset_unknown
, ESR_EL1
},
585 { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
586 access_vm_reg
, reset_unknown
, FAR_EL1
},
588 { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
589 NULL
, reset_unknown
, PAR_EL1
},
592 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
595 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
599 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
600 access_vm_reg
, reset_unknown
, MAIR_EL1
},
602 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
603 access_vm_reg
, reset_amair_el1
, AMAIR_EL1
},
606 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
607 NULL
, reset_val
, VBAR_EL1
, 0 },
610 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1011), Op2(0b101),
613 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
617 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
618 access_vm_reg
, reset_val
, CONTEXTIDR_EL1
, 0 },
620 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
621 NULL
, reset_unknown
, TPIDR_EL1
},
624 { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000),
625 NULL
, reset_val
, CNTKCTL_EL1
, 0},
628 { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
629 NULL
, reset_unknown
, CSSELR_EL1
},
632 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
635 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
638 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
641 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
644 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
647 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
650 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
653 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
656 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
659 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
662 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
665 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
668 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
672 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
673 NULL
, reset_unknown
, TPIDR_EL0
},
675 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
676 NULL
, reset_unknown
, TPIDRRO_EL0
},
679 { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
680 NULL
, reset_unknown
, DACR32_EL2
},
682 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001),
683 NULL
, reset_unknown
, IFSR32_EL2
},
685 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000),
686 NULL
, reset_val
, FPEXC32_EL2
, 0x70 },
689 static bool trap_dbgidr(struct kvm_vcpu
*vcpu
,
690 const struct sys_reg_params
*p
,
691 const struct sys_reg_desc
*r
)
694 return ignore_write(vcpu
, p
);
696 u64 dfr
= read_system_reg(SYS_ID_AA64DFR0_EL1
);
697 u64 pfr
= read_system_reg(SYS_ID_AA64PFR0_EL1
);
698 u32 el3
= !!cpuid_feature_extract_field(pfr
, ID_AA64PFR0_EL3_SHIFT
);
700 *vcpu_reg(vcpu
, p
->Rt
) = ((((dfr
>> ID_AA64DFR0_WRPS_SHIFT
) & 0xf) << 28) |
701 (((dfr
>> ID_AA64DFR0_BRPS_SHIFT
) & 0xf) << 24) |
702 (((dfr
>> ID_AA64DFR0_CTX_CMPS_SHIFT
) & 0xf) << 20) |
703 (6 << 16) | (el3
<< 14) | (el3
<< 12));
708 static bool trap_debug32(struct kvm_vcpu
*vcpu
,
709 const struct sys_reg_params
*p
,
710 const struct sys_reg_desc
*r
)
713 vcpu_cp14(vcpu
, r
->reg
) = *vcpu_reg(vcpu
, p
->Rt
);
714 vcpu
->arch
.debug_flags
|= KVM_ARM64_DEBUG_DIRTY
;
716 *vcpu_reg(vcpu
, p
->Rt
) = vcpu_cp14(vcpu
, r
->reg
);
722 /* AArch32 debug register mappings
724 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
725 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
727 * All control registers and watchpoint value registers are mapped to
728 * the lower 32 bits of their AArch64 equivalents. We share the trap
729 * handlers with the above AArch64 code which checks what mode the
733 static inline bool trap_xvr(struct kvm_vcpu
*vcpu
,
734 const struct sys_reg_params
*p
,
735 const struct sys_reg_desc
*rd
)
737 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->reg
];
743 val
|= *vcpu_reg(vcpu
, p
->Rt
) << 32;
746 vcpu
->arch
.debug_flags
|= KVM_ARM64_DEBUG_DIRTY
;
748 *vcpu_reg(vcpu
, p
->Rt
) = *dbg_reg
>> 32;
751 trace_trap_reg(__func__
, rd
->reg
, p
->is_write
, *dbg_reg
);
756 #define DBG_BCR_BVR_WCR_WVR(n) \
758 { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
760 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
762 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
764 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
767 { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
770 * Trapped cp14 registers. We generally ignore most of the external
771 * debug, on the principle that they don't really make sense to a
772 * guest. Revisit this one day, would this principle change.
774 static const struct sys_reg_desc cp14_regs
[] = {
776 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr
},
778 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi
},
780 DBG_BCR_BVR_WCR_WVR(0),
782 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi
},
783 DBG_BCR_BVR_WCR_WVR(1),
785 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32
},
787 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32
},
788 DBG_BCR_BVR_WCR_WVR(2),
790 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi
},
792 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi
},
793 DBG_BCR_BVR_WCR_WVR(3),
794 DBG_BCR_BVR_WCR_WVR(4),
795 DBG_BCR_BVR_WCR_WVR(5),
797 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi
},
799 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi
},
800 DBG_BCR_BVR_WCR_WVR(6),
802 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32
},
803 DBG_BCR_BVR_WCR_WVR(7),
804 DBG_BCR_BVR_WCR_WVR(8),
805 DBG_BCR_BVR_WCR_WVR(9),
806 DBG_BCR_BVR_WCR_WVR(10),
807 DBG_BCR_BVR_WCR_WVR(11),
808 DBG_BCR_BVR_WCR_WVR(12),
809 DBG_BCR_BVR_WCR_WVR(13),
810 DBG_BCR_BVR_WCR_WVR(14),
811 DBG_BCR_BVR_WCR_WVR(15),
813 /* DBGDRAR (32bit) */
814 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi
},
818 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi
},
821 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1
},
825 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi
},
828 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi
},
841 /* DBGDSAR (32bit) */
842 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi
},
845 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi
},
847 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi
},
849 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi
},
851 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi
},
853 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi
},
855 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1
},
858 /* Trapped cp14 64bit registers */
859 static const struct sys_reg_desc cp14_64_regs
[] = {
860 /* DBGDRAR (64bit) */
861 { Op1( 0), CRm( 1), .access
= trap_raz_wi
},
863 /* DBGDSAR (64bit) */
864 { Op1( 0), CRm( 2), .access
= trap_raz_wi
},
868 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
869 * depending on the way they are accessed (as a 32bit or a 64bit
872 static const struct sys_reg_desc cp15_regs
[] = {
873 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi
},
875 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c1_SCTLR
},
876 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c2_TTBR0
},
877 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg
, NULL
, c2_TTBR1
},
878 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg
, NULL
, c2_TTBCR
},
879 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c3_DACR
},
880 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c5_DFSR
},
881 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg
, NULL
, c5_IFSR
},
882 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg
, NULL
, c5_ADFSR
},
883 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg
, NULL
, c5_AIFSR
},
884 { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c6_DFAR
},
885 { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg
, NULL
, c6_IFAR
},
888 * DC{C,I,CI}SW operations:
890 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw
},
891 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw
},
892 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw
},
895 { Op1( 0), CRn( 9), CRm(12), Op2( 0), trap_raz_wi
},
896 { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi
},
897 { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi
},
898 { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi
},
899 { Op1( 0), CRn( 9), CRm(12), Op2( 5), trap_raz_wi
},
900 { Op1( 0), CRn( 9), CRm(12), Op2( 6), trap_raz_wi
},
901 { Op1( 0), CRn( 9), CRm(12), Op2( 7), trap_raz_wi
},
902 { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi
},
903 { Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi
},
904 { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi
},
905 { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi
},
906 { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi
},
907 { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi
},
909 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg
, NULL
, c10_PRRR
},
910 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg
, NULL
, c10_NMRR
},
911 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg
, NULL
, c10_AMAIR0
},
912 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg
, NULL
, c10_AMAIR1
},
915 { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi
},
917 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg
, NULL
, c13_CID
},
920 static const struct sys_reg_desc cp15_64_regs
[] = {
921 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg
, NULL
, c2_TTBR0
},
922 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi
},
923 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg
, NULL
, c2_TTBR1
},
926 /* Target specific emulation tables */
927 static struct kvm_sys_reg_target_table
*target_tables
[KVM_ARM_NUM_TARGETS
];
929 void kvm_register_target_sys_reg_table(unsigned int target
,
930 struct kvm_sys_reg_target_table
*table
)
932 target_tables
[target
] = table
;
935 /* Get specific register table for this target. */
936 static const struct sys_reg_desc
*get_target_table(unsigned target
,
940 struct kvm_sys_reg_target_table
*table
;
942 table
= target_tables
[target
];
944 *num
= table
->table64
.num
;
945 return table
->table64
.table
;
947 *num
= table
->table32
.num
;
948 return table
->table32
.table
;
952 static const struct sys_reg_desc
*find_reg(const struct sys_reg_params
*params
,
953 const struct sys_reg_desc table
[],
958 for (i
= 0; i
< num
; i
++) {
959 const struct sys_reg_desc
*r
= &table
[i
];
961 if (params
->Op0
!= r
->Op0
)
963 if (params
->Op1
!= r
->Op1
)
965 if (params
->CRn
!= r
->CRn
)
967 if (params
->CRm
!= r
->CRm
)
969 if (params
->Op2
!= r
->Op2
)
977 int kvm_handle_cp14_load_store(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
979 kvm_inject_undefined(vcpu
);
984 * emulate_cp -- tries to match a sys_reg access in a handling table, and
985 * call the corresponding trap handler.
987 * @params: pointer to the descriptor of the access
988 * @table: array of trap descriptors
989 * @num: size of the trap descriptor array
991 * Return 0 if the access has been handled, and -1 if not.
993 static int emulate_cp(struct kvm_vcpu
*vcpu
,
994 const struct sys_reg_params
*params
,
995 const struct sys_reg_desc
*table
,
998 const struct sys_reg_desc
*r
;
1001 return -1; /* Not handled */
1003 r
= find_reg(params
, table
, num
);
1007 * Not having an accessor means that we have
1008 * configured a trap that we don't know how to
1009 * handle. This certainly qualifies as a gross bug
1010 * that should be fixed right away.
1014 if (likely(r
->access(vcpu
, params
, r
))) {
1015 /* Skip instruction, since it was emulated */
1016 kvm_skip_instr(vcpu
, kvm_vcpu_trap_il_is32bit(vcpu
));
1027 static void unhandled_cp_access(struct kvm_vcpu
*vcpu
,
1028 struct sys_reg_params
*params
)
1030 u8 hsr_ec
= kvm_vcpu_trap_get_class(vcpu
);
1034 case ESR_ELx_EC_CP15_32
:
1035 case ESR_ELx_EC_CP15_64
:
1038 case ESR_ELx_EC_CP14_MR
:
1039 case ESR_ELx_EC_CP14_64
:
1046 kvm_err("Unsupported guest CP%d access at: %08lx\n",
1047 cp
, *vcpu_pc(vcpu
));
1048 print_sys_reg_instr(params
);
1049 kvm_inject_undefined(vcpu
);
1053 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access
1054 * @vcpu: The VCPU pointer
1055 * @run: The kvm_run struct
1057 static int kvm_handle_cp_64(struct kvm_vcpu
*vcpu
,
1058 const struct sys_reg_desc
*global
,
1060 const struct sys_reg_desc
*target_specific
,
1063 struct sys_reg_params params
;
1064 u32 hsr
= kvm_vcpu_get_hsr(vcpu
);
1065 int Rt2
= (hsr
>> 10) & 0xf;
1067 params
.is_aarch32
= true;
1068 params
.is_32bit
= false;
1069 params
.CRm
= (hsr
>> 1) & 0xf;
1070 params
.Rt
= (hsr
>> 5) & 0xf;
1071 params
.is_write
= ((hsr
& 1) == 0);
1074 params
.Op1
= (hsr
>> 16) & 0xf;
1079 * Massive hack here. Store Rt2 in the top 32bits so we only
1080 * have one register to deal with. As we use the same trap
1081 * backends between AArch32 and AArch64, we get away with it.
1083 if (params
.is_write
) {
1084 u64 val
= *vcpu_reg(vcpu
, params
.Rt
);
1086 val
|= *vcpu_reg(vcpu
, Rt2
) << 32;
1087 *vcpu_reg(vcpu
, params
.Rt
) = val
;
1090 if (!emulate_cp(vcpu
, ¶ms
, target_specific
, nr_specific
))
1092 if (!emulate_cp(vcpu
, ¶ms
, global
, nr_global
))
1095 unhandled_cp_access(vcpu
, ¶ms
);
1098 /* Do the opposite hack for the read side */
1099 if (!params
.is_write
) {
1100 u64 val
= *vcpu_reg(vcpu
, params
.Rt
);
1102 *vcpu_reg(vcpu
, Rt2
) = val
;
1109 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
1110 * @vcpu: The VCPU pointer
1111 * @run: The kvm_run struct
1113 static int kvm_handle_cp_32(struct kvm_vcpu
*vcpu
,
1114 const struct sys_reg_desc
*global
,
1116 const struct sys_reg_desc
*target_specific
,
1119 struct sys_reg_params params
;
1120 u32 hsr
= kvm_vcpu_get_hsr(vcpu
);
1122 params
.is_aarch32
= true;
1123 params
.is_32bit
= true;
1124 params
.CRm
= (hsr
>> 1) & 0xf;
1125 params
.Rt
= (hsr
>> 5) & 0xf;
1126 params
.is_write
= ((hsr
& 1) == 0);
1127 params
.CRn
= (hsr
>> 10) & 0xf;
1129 params
.Op1
= (hsr
>> 14) & 0x7;
1130 params
.Op2
= (hsr
>> 17) & 0x7;
1132 if (!emulate_cp(vcpu
, ¶ms
, target_specific
, nr_specific
))
1134 if (!emulate_cp(vcpu
, ¶ms
, global
, nr_global
))
1137 unhandled_cp_access(vcpu
, ¶ms
);
1141 int kvm_handle_cp15_64(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1143 const struct sys_reg_desc
*target_specific
;
1146 target_specific
= get_target_table(vcpu
->arch
.target
, false, &num
);
1147 return kvm_handle_cp_64(vcpu
,
1148 cp15_64_regs
, ARRAY_SIZE(cp15_64_regs
),
1149 target_specific
, num
);
1152 int kvm_handle_cp15_32(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1154 const struct sys_reg_desc
*target_specific
;
1157 target_specific
= get_target_table(vcpu
->arch
.target
, false, &num
);
1158 return kvm_handle_cp_32(vcpu
,
1159 cp15_regs
, ARRAY_SIZE(cp15_regs
),
1160 target_specific
, num
);
1163 int kvm_handle_cp14_64(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1165 return kvm_handle_cp_64(vcpu
,
1166 cp14_64_regs
, ARRAY_SIZE(cp14_64_regs
),
1170 int kvm_handle_cp14_32(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1172 return kvm_handle_cp_32(vcpu
,
1173 cp14_regs
, ARRAY_SIZE(cp14_regs
),
1177 static int emulate_sys_reg(struct kvm_vcpu
*vcpu
,
1178 const struct sys_reg_params
*params
)
1181 const struct sys_reg_desc
*table
, *r
;
1183 table
= get_target_table(vcpu
->arch
.target
, true, &num
);
1185 /* Search target-specific then generic table. */
1186 r
= find_reg(params
, table
, num
);
1188 r
= find_reg(params
, sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
));
1192 * Not having an accessor means that we have
1193 * configured a trap that we don't know how to
1194 * handle. This certainly qualifies as a gross bug
1195 * that should be fixed right away.
1199 if (likely(r
->access(vcpu
, params
, r
))) {
1200 /* Skip instruction, since it was emulated */
1201 kvm_skip_instr(vcpu
, kvm_vcpu_trap_il_is32bit(vcpu
));
1204 /* If access function fails, it should complain. */
1206 kvm_err("Unsupported guest sys_reg access at: %lx\n",
1208 print_sys_reg_instr(params
);
1210 kvm_inject_undefined(vcpu
);
1214 static void reset_sys_reg_descs(struct kvm_vcpu
*vcpu
,
1215 const struct sys_reg_desc
*table
, size_t num
)
1219 for (i
= 0; i
< num
; i
++)
1221 table
[i
].reset(vcpu
, &table
[i
]);
1225 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
1226 * @vcpu: The VCPU pointer
1227 * @run: The kvm_run struct
1229 int kvm_handle_sys_reg(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1231 struct sys_reg_params params
;
1232 unsigned long esr
= kvm_vcpu_get_hsr(vcpu
);
1234 trace_kvm_handle_sys_reg(esr
);
1236 params
.is_aarch32
= false;
1237 params
.is_32bit
= false;
1238 params
.Op0
= (esr
>> 20) & 3;
1239 params
.Op1
= (esr
>> 14) & 0x7;
1240 params
.CRn
= (esr
>> 10) & 0xf;
1241 params
.CRm
= (esr
>> 1) & 0xf;
1242 params
.Op2
= (esr
>> 17) & 0x7;
1243 params
.Rt
= (esr
>> 5) & 0x1f;
1244 params
.is_write
= !(esr
& 1);
1246 return emulate_sys_reg(vcpu
, ¶ms
);
1249 /******************************************************************************
1251 *****************************************************************************/
1253 static bool index_to_params(u64 id
, struct sys_reg_params
*params
)
1255 switch (id
& KVM_REG_SIZE_MASK
) {
1256 case KVM_REG_SIZE_U64
:
1257 /* Any unused index bits means it's not valid. */
1258 if (id
& ~(KVM_REG_ARCH_MASK
| KVM_REG_SIZE_MASK
1259 | KVM_REG_ARM_COPROC_MASK
1260 | KVM_REG_ARM64_SYSREG_OP0_MASK
1261 | KVM_REG_ARM64_SYSREG_OP1_MASK
1262 | KVM_REG_ARM64_SYSREG_CRN_MASK
1263 | KVM_REG_ARM64_SYSREG_CRM_MASK
1264 | KVM_REG_ARM64_SYSREG_OP2_MASK
))
1266 params
->Op0
= ((id
& KVM_REG_ARM64_SYSREG_OP0_MASK
)
1267 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT
);
1268 params
->Op1
= ((id
& KVM_REG_ARM64_SYSREG_OP1_MASK
)
1269 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT
);
1270 params
->CRn
= ((id
& KVM_REG_ARM64_SYSREG_CRN_MASK
)
1271 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT
);
1272 params
->CRm
= ((id
& KVM_REG_ARM64_SYSREG_CRM_MASK
)
1273 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT
);
1274 params
->Op2
= ((id
& KVM_REG_ARM64_SYSREG_OP2_MASK
)
1275 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT
);
1282 /* Decode an index value, and find the sys_reg_desc entry. */
1283 static const struct sys_reg_desc
*index_to_sys_reg_desc(struct kvm_vcpu
*vcpu
,
1287 const struct sys_reg_desc
*table
, *r
;
1288 struct sys_reg_params params
;
1290 /* We only do sys_reg for now. */
1291 if ((id
& KVM_REG_ARM_COPROC_MASK
) != KVM_REG_ARM64_SYSREG
)
1294 if (!index_to_params(id
, ¶ms
))
1297 table
= get_target_table(vcpu
->arch
.target
, true, &num
);
1298 r
= find_reg(¶ms
, table
, num
);
1300 r
= find_reg(¶ms
, sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
));
1302 /* Not saved in the sys_reg array? */
1310 * These are the invariant sys_reg registers: we let the guest see the
1311 * host versions of these, so they're part of the guest state.
1313 * A future CPU may provide a mechanism to present different values to
1314 * the guest, or a future kvm may trap them.
1317 #define FUNCTION_INVARIANT(reg) \
1318 static void get_##reg(struct kvm_vcpu *v, \
1319 const struct sys_reg_desc *r) \
1323 asm volatile("mrs %0, " __stringify(reg) "\n" \
1325 ((struct sys_reg_desc *)r)->val = val; \
1328 FUNCTION_INVARIANT(midr_el1
)
1329 FUNCTION_INVARIANT(ctr_el0
)
1330 FUNCTION_INVARIANT(revidr_el1
)
1331 FUNCTION_INVARIANT(id_pfr0_el1
)
1332 FUNCTION_INVARIANT(id_pfr1_el1
)
1333 FUNCTION_INVARIANT(id_dfr0_el1
)
1334 FUNCTION_INVARIANT(id_afr0_el1
)
1335 FUNCTION_INVARIANT(id_mmfr0_el1
)
1336 FUNCTION_INVARIANT(id_mmfr1_el1
)
1337 FUNCTION_INVARIANT(id_mmfr2_el1
)
1338 FUNCTION_INVARIANT(id_mmfr3_el1
)
1339 FUNCTION_INVARIANT(id_isar0_el1
)
1340 FUNCTION_INVARIANT(id_isar1_el1
)
1341 FUNCTION_INVARIANT(id_isar2_el1
)
1342 FUNCTION_INVARIANT(id_isar3_el1
)
1343 FUNCTION_INVARIANT(id_isar4_el1
)
1344 FUNCTION_INVARIANT(id_isar5_el1
)
1345 FUNCTION_INVARIANT(clidr_el1
)
1346 FUNCTION_INVARIANT(aidr_el1
)
1348 /* ->val is filled in by kvm_sys_reg_table_init() */
1349 static struct sys_reg_desc invariant_sys_regs
[] = {
1350 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000),
1351 NULL
, get_midr_el1
},
1352 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110),
1353 NULL
, get_revidr_el1
},
1354 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000),
1355 NULL
, get_id_pfr0_el1
},
1356 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001),
1357 NULL
, get_id_pfr1_el1
},
1358 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010),
1359 NULL
, get_id_dfr0_el1
},
1360 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011),
1361 NULL
, get_id_afr0_el1
},
1362 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100),
1363 NULL
, get_id_mmfr0_el1
},
1364 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101),
1365 NULL
, get_id_mmfr1_el1
},
1366 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110),
1367 NULL
, get_id_mmfr2_el1
},
1368 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111),
1369 NULL
, get_id_mmfr3_el1
},
1370 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
1371 NULL
, get_id_isar0_el1
},
1372 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001),
1373 NULL
, get_id_isar1_el1
},
1374 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
1375 NULL
, get_id_isar2_el1
},
1376 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011),
1377 NULL
, get_id_isar3_el1
},
1378 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100),
1379 NULL
, get_id_isar4_el1
},
1380 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101),
1381 NULL
, get_id_isar5_el1
},
1382 { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001),
1383 NULL
, get_clidr_el1
},
1384 { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111),
1385 NULL
, get_aidr_el1
},
1386 { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001),
1387 NULL
, get_ctr_el0
},
1390 static int reg_from_user(u64
*val
, const void __user
*uaddr
, u64 id
)
1392 if (copy_from_user(val
, uaddr
, KVM_REG_SIZE(id
)) != 0)
1397 static int reg_to_user(void __user
*uaddr
, const u64
*val
, u64 id
)
1399 if (copy_to_user(uaddr
, val
, KVM_REG_SIZE(id
)) != 0)
1404 static int get_invariant_sys_reg(u64 id
, void __user
*uaddr
)
1406 struct sys_reg_params params
;
1407 const struct sys_reg_desc
*r
;
1409 if (!index_to_params(id
, ¶ms
))
1412 r
= find_reg(¶ms
, invariant_sys_regs
, ARRAY_SIZE(invariant_sys_regs
));
1416 return reg_to_user(uaddr
, &r
->val
, id
);
1419 static int set_invariant_sys_reg(u64 id
, void __user
*uaddr
)
1421 struct sys_reg_params params
;
1422 const struct sys_reg_desc
*r
;
1424 u64 val
= 0; /* Make sure high bits are 0 for 32-bit regs */
1426 if (!index_to_params(id
, ¶ms
))
1428 r
= find_reg(¶ms
, invariant_sys_regs
, ARRAY_SIZE(invariant_sys_regs
));
1432 err
= reg_from_user(&val
, uaddr
, id
);
1436 /* This is what we mean by invariant: you can't change it. */
1443 static bool is_valid_cache(u32 val
)
1447 if (val
>= CSSELR_MAX
)
1450 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
1452 ctype
= (cache_levels
>> (level
* 3)) & 7;
1455 case 0: /* No cache */
1457 case 1: /* Instruction cache only */
1459 case 2: /* Data cache only */
1460 case 4: /* Unified cache */
1462 case 3: /* Separate instruction and data caches */
1464 default: /* Reserved: we can't know instruction or data. */
1469 static int demux_c15_get(u64 id
, void __user
*uaddr
)
1472 u32 __user
*uval
= uaddr
;
1474 /* Fail if we have unknown bits set. */
1475 if (id
& ~(KVM_REG_ARCH_MASK
|KVM_REG_SIZE_MASK
|KVM_REG_ARM_COPROC_MASK
1476 | ((1 << KVM_REG_ARM_COPROC_SHIFT
)-1)))
1479 switch (id
& KVM_REG_ARM_DEMUX_ID_MASK
) {
1480 case KVM_REG_ARM_DEMUX_ID_CCSIDR
:
1481 if (KVM_REG_SIZE(id
) != 4)
1483 val
= (id
& KVM_REG_ARM_DEMUX_VAL_MASK
)
1484 >> KVM_REG_ARM_DEMUX_VAL_SHIFT
;
1485 if (!is_valid_cache(val
))
1488 return put_user(get_ccsidr(val
), uval
);
1494 static int demux_c15_set(u64 id
, void __user
*uaddr
)
1497 u32 __user
*uval
= uaddr
;
1499 /* Fail if we have unknown bits set. */
1500 if (id
& ~(KVM_REG_ARCH_MASK
|KVM_REG_SIZE_MASK
|KVM_REG_ARM_COPROC_MASK
1501 | ((1 << KVM_REG_ARM_COPROC_SHIFT
)-1)))
1504 switch (id
& KVM_REG_ARM_DEMUX_ID_MASK
) {
1505 case KVM_REG_ARM_DEMUX_ID_CCSIDR
:
1506 if (KVM_REG_SIZE(id
) != 4)
1508 val
= (id
& KVM_REG_ARM_DEMUX_VAL_MASK
)
1509 >> KVM_REG_ARM_DEMUX_VAL_SHIFT
;
1510 if (!is_valid_cache(val
))
1513 if (get_user(newval
, uval
))
1516 /* This is also invariant: you can't change it. */
1517 if (newval
!= get_ccsidr(val
))
1525 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
1527 const struct sys_reg_desc
*r
;
1528 void __user
*uaddr
= (void __user
*)(unsigned long)reg
->addr
;
1530 if ((reg
->id
& KVM_REG_ARM_COPROC_MASK
) == KVM_REG_ARM_DEMUX
)
1531 return demux_c15_get(reg
->id
, uaddr
);
1533 if (KVM_REG_SIZE(reg
->id
) != sizeof(__u64
))
1536 r
= index_to_sys_reg_desc(vcpu
, reg
->id
);
1538 return get_invariant_sys_reg(reg
->id
, uaddr
);
1541 return (r
->get_user
)(vcpu
, r
, reg
, uaddr
);
1543 return reg_to_user(uaddr
, &vcpu_sys_reg(vcpu
, r
->reg
), reg
->id
);
1546 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
1548 const struct sys_reg_desc
*r
;
1549 void __user
*uaddr
= (void __user
*)(unsigned long)reg
->addr
;
1551 if ((reg
->id
& KVM_REG_ARM_COPROC_MASK
) == KVM_REG_ARM_DEMUX
)
1552 return demux_c15_set(reg
->id
, uaddr
);
1554 if (KVM_REG_SIZE(reg
->id
) != sizeof(__u64
))
1557 r
= index_to_sys_reg_desc(vcpu
, reg
->id
);
1559 return set_invariant_sys_reg(reg
->id
, uaddr
);
1562 return (r
->set_user
)(vcpu
, r
, reg
, uaddr
);
1564 return reg_from_user(&vcpu_sys_reg(vcpu
, r
->reg
), uaddr
, reg
->id
);
1567 static unsigned int num_demux_regs(void)
1569 unsigned int i
, count
= 0;
1571 for (i
= 0; i
< CSSELR_MAX
; i
++)
1572 if (is_valid_cache(i
))
1578 static int write_demux_regids(u64 __user
*uindices
)
1580 u64 val
= KVM_REG_ARM64
| KVM_REG_SIZE_U32
| KVM_REG_ARM_DEMUX
;
1583 val
|= KVM_REG_ARM_DEMUX_ID_CCSIDR
;
1584 for (i
= 0; i
< CSSELR_MAX
; i
++) {
1585 if (!is_valid_cache(i
))
1587 if (put_user(val
| i
, uindices
))
1594 static u64
sys_reg_to_index(const struct sys_reg_desc
*reg
)
1596 return (KVM_REG_ARM64
| KVM_REG_SIZE_U64
|
1597 KVM_REG_ARM64_SYSREG
|
1598 (reg
->Op0
<< KVM_REG_ARM64_SYSREG_OP0_SHIFT
) |
1599 (reg
->Op1
<< KVM_REG_ARM64_SYSREG_OP1_SHIFT
) |
1600 (reg
->CRn
<< KVM_REG_ARM64_SYSREG_CRN_SHIFT
) |
1601 (reg
->CRm
<< KVM_REG_ARM64_SYSREG_CRM_SHIFT
) |
1602 (reg
->Op2
<< KVM_REG_ARM64_SYSREG_OP2_SHIFT
));
1605 static bool copy_reg_to_user(const struct sys_reg_desc
*reg
, u64 __user
**uind
)
1610 if (put_user(sys_reg_to_index(reg
), *uind
))
1617 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
1618 static int walk_sys_regs(struct kvm_vcpu
*vcpu
, u64 __user
*uind
)
1620 const struct sys_reg_desc
*i1
, *i2
, *end1
, *end2
;
1621 unsigned int total
= 0;
1624 /* We check for duplicates here, to allow arch-specific overrides. */
1625 i1
= get_target_table(vcpu
->arch
.target
, true, &num
);
1628 end2
= sys_reg_descs
+ ARRAY_SIZE(sys_reg_descs
);
1630 BUG_ON(i1
== end1
|| i2
== end2
);
1632 /* Walk carefully, as both tables may refer to the same register. */
1634 int cmp
= cmp_sys_reg(i1
, i2
);
1635 /* target-specific overrides generic entry. */
1637 /* Ignore registers we trap but don't save. */
1639 if (!copy_reg_to_user(i1
, &uind
))
1644 /* Ignore registers we trap but don't save. */
1646 if (!copy_reg_to_user(i2
, &uind
))
1652 if (cmp
<= 0 && ++i1
== end1
)
1654 if (cmp
>= 0 && ++i2
== end2
)
1660 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu
*vcpu
)
1662 return ARRAY_SIZE(invariant_sys_regs
)
1664 + walk_sys_regs(vcpu
, (u64 __user
*)NULL
);
1667 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu
*vcpu
, u64 __user
*uindices
)
1672 /* Then give them all the invariant registers' indices. */
1673 for (i
= 0; i
< ARRAY_SIZE(invariant_sys_regs
); i
++) {
1674 if (put_user(sys_reg_to_index(&invariant_sys_regs
[i
]), uindices
))
1679 err
= walk_sys_regs(vcpu
, uindices
);
1684 return write_demux_regids(uindices
);
1687 static int check_sysreg_table(const struct sys_reg_desc
*table
, unsigned int n
)
1691 for (i
= 1; i
< n
; i
++) {
1692 if (cmp_sys_reg(&table
[i
-1], &table
[i
]) >= 0) {
1693 kvm_err("sys_reg table %p out of order (%d)\n", table
, i
- 1);
1701 void kvm_sys_reg_table_init(void)
1704 struct sys_reg_desc clidr
;
1706 /* Make sure tables are unique and in order. */
1707 BUG_ON(check_sysreg_table(sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
)));
1708 BUG_ON(check_sysreg_table(cp14_regs
, ARRAY_SIZE(cp14_regs
)));
1709 BUG_ON(check_sysreg_table(cp14_64_regs
, ARRAY_SIZE(cp14_64_regs
)));
1710 BUG_ON(check_sysreg_table(cp15_regs
, ARRAY_SIZE(cp15_regs
)));
1711 BUG_ON(check_sysreg_table(cp15_64_regs
, ARRAY_SIZE(cp15_64_regs
)));
1712 BUG_ON(check_sysreg_table(invariant_sys_regs
, ARRAY_SIZE(invariant_sys_regs
)));
1714 /* We abuse the reset function to overwrite the table itself. */
1715 for (i
= 0; i
< ARRAY_SIZE(invariant_sys_regs
); i
++)
1716 invariant_sys_regs
[i
].reset(NULL
, &invariant_sys_regs
[i
]);
1719 * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
1721 * If software reads the Cache Type fields from Ctype1
1722 * upwards, once it has seen a value of 0b000, no caches
1723 * exist at further-out levels of the hierarchy. So, for
1724 * example, if Ctype3 is the first Cache Type field with a
1725 * value of 0b000, the values of Ctype4 to Ctype7 must be
1728 get_clidr_el1(NULL
, &clidr
); /* Ugly... */
1729 cache_levels
= clidr
.val
;
1730 for (i
= 0; i
< 7; i
++)
1731 if (((cache_levels
>> (i
*3)) & 7) == 0)
1733 /* Clear all higher bits. */
1734 cache_levels
&= (1 << (i
*3))-1;
1738 * kvm_reset_sys_regs - sets system registers to reset value
1739 * @vcpu: The VCPU pointer
1741 * This function finds the right table above and sets the registers on the
1742 * virtual CPU struct to their architecturally defined reset values.
1744 void kvm_reset_sys_regs(struct kvm_vcpu
*vcpu
)
1747 const struct sys_reg_desc
*table
;
1749 /* Catch someone adding a register without putting in reset entry. */
1750 memset(&vcpu
->arch
.ctxt
.sys_regs
, 0x42, sizeof(vcpu
->arch
.ctxt
.sys_regs
));
1752 /* Generic chip reset first (so target could override). */
1753 reset_sys_reg_descs(vcpu
, sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
));
1755 table
= get_target_table(vcpu
->arch
.target
, true, &num
);
1756 reset_sys_reg_descs(vcpu
, table
, num
);
1758 for (num
= 1; num
< NR_SYS_REGS
; num
++)
1759 if (vcpu_sys_reg(vcpu
, num
) == 0x4242424242424242)
1760 panic("Didn't reset vcpu_sys_reg(%zi)", num
);