]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/arm64/kvm/sys_regs.c
arm64: KVM: check ordering of all system register tables
[mirror_ubuntu-artful-kernel.git] / arch / arm64 / kvm / sys_regs.c
CommitLineData
7c8c5e6a
MZ
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/kvm/coproc.c:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Authors: Rusty Russell <rusty@rustcorp.com.au>
8 * Christoffer Dall <c.dall@virtualopensystems.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22
23#include <linux/mm.h>
24#include <linux/kvm_host.h>
25#include <linux/uaccess.h>
26#include <asm/kvm_arm.h>
27#include <asm/kvm_host.h>
28#include <asm/kvm_emulate.h>
29#include <asm/kvm_coproc.h>
9d218a1f 30#include <asm/kvm_mmu.h>
7c8c5e6a
MZ
31#include <asm/cacheflush.h>
32#include <asm/cputype.h>
0c557ed4 33#include <asm/debug-monitors.h>
7c8c5e6a
MZ
34#include <trace/events/kvm.h>
35
36#include "sys_regs.h"
37
38/*
39 * All of this file is extremly similar to the ARM coproc.c, but the
40 * types are different. My gut feeling is that it should be pretty
41 * easy to merge, but that would be an ABI breakage -- again. VFP
42 * would also need to be abstracted.
62a89c44
MZ
43 *
44 * For AArch32, we only take care of what is being trapped. Anything
45 * that has to do with init and userspace access has to go via the
46 * 64bit interface.
7c8c5e6a
MZ
47 */
48
49/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
50static u32 cache_levels;
51
52/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
53#define CSSELR_MAX 12
54
55/* Which cache CCSIDR represents depends on CSSELR value. */
56static u32 get_ccsidr(u32 csselr)
57{
58 u32 ccsidr;
59
60 /* Make sure noone else changes CSSELR during this! */
61 local_irq_disable();
62 /* Put value into CSSELR */
63 asm volatile("msr csselr_el1, %x0" : : "r" (csselr));
64 isb();
65 /* Read result out of CCSIDR */
66 asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr));
67 local_irq_enable();
68
69 return ccsidr;
70}
71
72static void do_dc_cisw(u32 val)
73{
74 asm volatile("dc cisw, %x0" : : "r" (val));
98f7685e 75 dsb(ish);
7c8c5e6a
MZ
76}
77
78static void do_dc_csw(u32 val)
79{
80 asm volatile("dc csw, %x0" : : "r" (val));
98f7685e 81 dsb(ish);
7c8c5e6a
MZ
82}
83
84/* See note at ARM ARM B1.14.4 */
85static bool access_dcsw(struct kvm_vcpu *vcpu,
86 const struct sys_reg_params *p,
87 const struct sys_reg_desc *r)
88{
89 unsigned long val;
90 int cpu;
91
92 if (!p->is_write)
93 return read_from_write_only(vcpu, p);
94
95 cpu = get_cpu();
96
97 cpumask_setall(&vcpu->arch.require_dcache_flush);
98 cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
99
100 /* If we were already preempted, take the long way around */
101 if (cpu != vcpu->arch.last_pcpu) {
102 flush_cache_all();
103 goto done;
104 }
105
106 val = *vcpu_reg(vcpu, p->Rt);
107
108 switch (p->CRm) {
109 case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
110 case 14: /* DCCISW */
111 do_dc_cisw(val);
112 break;
113
114 case 10: /* DCCSW */
115 do_dc_csw(val);
116 break;
117 }
118
119done:
120 put_cpu();
121
122 return true;
123}
124
4d44923b
MZ
125/*
126 * Generic accessor for VM registers. Only called as long as HCR_TVM
127 * is set.
128 */
129static bool access_vm_reg(struct kvm_vcpu *vcpu,
130 const struct sys_reg_params *p,
131 const struct sys_reg_desc *r)
132{
133 unsigned long val;
134
135 BUG_ON(!p->is_write);
136
137 val = *vcpu_reg(vcpu, p->Rt);
f0a3eaff 138 if (!p->is_aarch32 || !p->is_32bit)
4d44923b 139 vcpu_sys_reg(vcpu, r->reg) = val;
f0a3eaff
VK
140 else
141 vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
142
4d44923b
MZ
143 return true;
144}
145
146/*
147 * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set. If the
148 * guest enables the MMU, we stop trapping the VM sys_regs and leave
149 * it in complete control of the caches.
150 */
151static bool access_sctlr(struct kvm_vcpu *vcpu,
152 const struct sys_reg_params *p,
153 const struct sys_reg_desc *r)
154{
155 access_vm_reg(vcpu, p, r);
156
9d218a1f 157 if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
4d44923b 158 vcpu->arch.hcr_el2 &= ~HCR_TVM;
9d218a1f
MZ
159 stage2_flush_vm(vcpu->kvm);
160 }
4d44923b
MZ
161
162 return true;
163}
164
7609c125
MZ
165static bool trap_raz_wi(struct kvm_vcpu *vcpu,
166 const struct sys_reg_params *p,
167 const struct sys_reg_desc *r)
7c8c5e6a
MZ
168{
169 if (p->is_write)
170 return ignore_write(vcpu, p);
171 else
172 return read_zero(vcpu, p);
173}
174
0c557ed4
MZ
175static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
176 const struct sys_reg_params *p,
177 const struct sys_reg_desc *r)
178{
179 if (p->is_write) {
180 return ignore_write(vcpu, p);
181 } else {
182 *vcpu_reg(vcpu, p->Rt) = (1 << 3);
183 return true;
184 }
185}
186
187static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
188 const struct sys_reg_params *p,
189 const struct sys_reg_desc *r)
190{
191 if (p->is_write) {
192 return ignore_write(vcpu, p);
193 } else {
194 u32 val;
195 asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val));
196 *vcpu_reg(vcpu, p->Rt) = val;
197 return true;
198 }
199}
200
201/*
202 * We want to avoid world-switching all the DBG registers all the
203 * time:
204 *
205 * - If we've touched any debug register, it is likely that we're
206 * going to touch more of them. It then makes sense to disable the
207 * traps and start doing the save/restore dance
208 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
209 * then mandatory to save/restore the registers, as the guest
210 * depends on them.
211 *
212 * For this, we use a DIRTY bit, indicating the guest has modified the
213 * debug registers, used as follow:
214 *
215 * On guest entry:
216 * - If the dirty bit is set (because we're coming back from trapping),
217 * disable the traps, save host registers, restore guest registers.
218 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
219 * set the dirty bit, disable the traps, save host registers,
220 * restore guest registers.
221 * - Otherwise, enable the traps
222 *
223 * On guest exit:
224 * - If the dirty bit is set, save guest registers, restore host
225 * registers and clear the dirty bit. This ensure that the host can
226 * now use the debug registers.
227 */
228static bool trap_debug_regs(struct kvm_vcpu *vcpu,
229 const struct sys_reg_params *p,
230 const struct sys_reg_desc *r)
231{
232 if (p->is_write) {
233 vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
234 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
235 } else {
236 *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg);
237 }
238
239 return true;
240}
241
7c8c5e6a
MZ
242static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
243{
244 u64 amair;
245
246 asm volatile("mrs %0, amair_el1\n" : "=r" (amair));
247 vcpu_sys_reg(vcpu, AMAIR_EL1) = amair;
248}
249
250static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
251{
252 /*
253 * Simply map the vcpu_id into the Aff0 field of the MPIDR.
254 */
255 vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff);
256}
257
0c557ed4
MZ
258/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
259#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
260 /* DBGBVRn_EL1 */ \
261 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \
262 trap_debug_regs, reset_val, (DBGBVR0_EL1 + (n)), 0 }, \
263 /* DBGBCRn_EL1 */ \
264 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \
265 trap_debug_regs, reset_val, (DBGBCR0_EL1 + (n)), 0 }, \
266 /* DBGWVRn_EL1 */ \
267 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \
268 trap_debug_regs, reset_val, (DBGWVR0_EL1 + (n)), 0 }, \
269 /* DBGWCRn_EL1 */ \
270 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \
271 trap_debug_regs, reset_val, (DBGWCR0_EL1 + (n)), 0 }
272
7c8c5e6a
MZ
273/*
274 * Architected system registers.
275 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
7609c125
MZ
276 *
277 * We could trap ID_DFR0 and tell the guest we don't support performance
278 * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
279 * NAKed, so it will read the PMCR anyway.
280 *
281 * Therefore we tell the guest we have 0 counters. Unfortunately, we
282 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
283 * all PM registers, which doesn't crash the guest kernel at least.
284 *
0c557ed4
MZ
285 * Debug handling: We do trap most, if not all debug related system
286 * registers. The implementation is good enough to ensure that a guest
287 * can use these with minimal performance degradation. The drawback is
288 * that we don't implement any of the external debug, none of the
289 * OSlock protocol. This should be revisited if we ever encounter a
290 * more demanding guest...
7c8c5e6a
MZ
291 */
292static const struct sys_reg_desc sys_reg_descs[] = {
293 /* DC ISW */
294 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010),
295 access_dcsw },
296 /* DC CSW */
297 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010),
298 access_dcsw },
299 /* DC CISW */
300 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
301 access_dcsw },
302
0c557ed4
MZ
303 DBG_BCR_BVR_WCR_WVR_EL1(0),
304 DBG_BCR_BVR_WCR_WVR_EL1(1),
305 /* MDCCINT_EL1 */
306 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
307 trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
308 /* MDSCR_EL1 */
309 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
310 trap_debug_regs, reset_val, MDSCR_EL1, 0 },
311 DBG_BCR_BVR_WCR_WVR_EL1(2),
312 DBG_BCR_BVR_WCR_WVR_EL1(3),
313 DBG_BCR_BVR_WCR_WVR_EL1(4),
314 DBG_BCR_BVR_WCR_WVR_EL1(5),
315 DBG_BCR_BVR_WCR_WVR_EL1(6),
316 DBG_BCR_BVR_WCR_WVR_EL1(7),
317 DBG_BCR_BVR_WCR_WVR_EL1(8),
318 DBG_BCR_BVR_WCR_WVR_EL1(9),
319 DBG_BCR_BVR_WCR_WVR_EL1(10),
320 DBG_BCR_BVR_WCR_WVR_EL1(11),
321 DBG_BCR_BVR_WCR_WVR_EL1(12),
322 DBG_BCR_BVR_WCR_WVR_EL1(13),
323 DBG_BCR_BVR_WCR_WVR_EL1(14),
324 DBG_BCR_BVR_WCR_WVR_EL1(15),
325
326 /* MDRAR_EL1 */
327 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
328 trap_raz_wi },
329 /* OSLAR_EL1 */
330 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100),
331 trap_raz_wi },
332 /* OSLSR_EL1 */
333 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100),
334 trap_oslsr_el1 },
335 /* OSDLR_EL1 */
336 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100),
337 trap_raz_wi },
338 /* DBGPRCR_EL1 */
339 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100),
340 trap_raz_wi },
341 /* DBGCLAIMSET_EL1 */
342 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110),
343 trap_raz_wi },
344 /* DBGCLAIMCLR_EL1 */
345 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110),
346 trap_raz_wi },
347 /* DBGAUTHSTATUS_EL1 */
348 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110),
349 trap_dbgauthstatus_el1 },
350
62a89c44
MZ
351 /* TEECR32_EL1 */
352 { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
353 NULL, reset_val, TEECR32_EL1, 0 },
354 /* TEEHBR32_EL1 */
355 { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000),
356 NULL, reset_val, TEEHBR32_EL1, 0 },
0c557ed4
MZ
357
358 /* MDCCSR_EL1 */
359 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000),
360 trap_raz_wi },
361 /* DBGDTR_EL0 */
362 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000),
363 trap_raz_wi },
364 /* DBGDTR[TR]X_EL0 */
365 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000),
366 trap_raz_wi },
367
62a89c44
MZ
368 /* DBGVCR32_EL2 */
369 { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
370 NULL, reset_val, DBGVCR32_EL2, 0 },
371
7c8c5e6a
MZ
372 /* MPIDR_EL1 */
373 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101),
374 NULL, reset_mpidr, MPIDR_EL1 },
375 /* SCTLR_EL1 */
376 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
4d44923b 377 access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 },
7c8c5e6a
MZ
378 /* CPACR_EL1 */
379 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
380 NULL, reset_val, CPACR_EL1, 0 },
381 /* TTBR0_EL1 */
382 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
4d44923b 383 access_vm_reg, reset_unknown, TTBR0_EL1 },
7c8c5e6a
MZ
384 /* TTBR1_EL1 */
385 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
4d44923b 386 access_vm_reg, reset_unknown, TTBR1_EL1 },
7c8c5e6a
MZ
387 /* TCR_EL1 */
388 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
4d44923b 389 access_vm_reg, reset_val, TCR_EL1, 0 },
7c8c5e6a
MZ
390
391 /* AFSR0_EL1 */
392 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
4d44923b 393 access_vm_reg, reset_unknown, AFSR0_EL1 },
7c8c5e6a
MZ
394 /* AFSR1_EL1 */
395 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
4d44923b 396 access_vm_reg, reset_unknown, AFSR1_EL1 },
7c8c5e6a
MZ
397 /* ESR_EL1 */
398 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
4d44923b 399 access_vm_reg, reset_unknown, ESR_EL1 },
7c8c5e6a
MZ
400 /* FAR_EL1 */
401 { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
4d44923b 402 access_vm_reg, reset_unknown, FAR_EL1 },
1bbd8054
MZ
403 /* PAR_EL1 */
404 { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
405 NULL, reset_unknown, PAR_EL1 },
7c8c5e6a
MZ
406
407 /* PMINTENSET_EL1 */
408 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
7609c125 409 trap_raz_wi },
7c8c5e6a
MZ
410 /* PMINTENCLR_EL1 */
411 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
7609c125 412 trap_raz_wi },
7c8c5e6a
MZ
413
414 /* MAIR_EL1 */
415 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
4d44923b 416 access_vm_reg, reset_unknown, MAIR_EL1 },
7c8c5e6a
MZ
417 /* AMAIR_EL1 */
418 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
4d44923b 419 access_vm_reg, reset_amair_el1, AMAIR_EL1 },
7c8c5e6a
MZ
420
421 /* VBAR_EL1 */
422 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
423 NULL, reset_val, VBAR_EL1, 0 },
424 /* CONTEXTIDR_EL1 */
425 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
4d44923b 426 access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
7c8c5e6a
MZ
427 /* TPIDR_EL1 */
428 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
429 NULL, reset_unknown, TPIDR_EL1 },
430
431 /* CNTKCTL_EL1 */
432 { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000),
433 NULL, reset_val, CNTKCTL_EL1, 0},
434
435 /* CSSELR_EL1 */
436 { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
437 NULL, reset_unknown, CSSELR_EL1 },
438
439 /* PMCR_EL0 */
440 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
7609c125 441 trap_raz_wi },
7c8c5e6a
MZ
442 /* PMCNTENSET_EL0 */
443 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
7609c125 444 trap_raz_wi },
7c8c5e6a
MZ
445 /* PMCNTENCLR_EL0 */
446 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
7609c125 447 trap_raz_wi },
7c8c5e6a
MZ
448 /* PMOVSCLR_EL0 */
449 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
7609c125 450 trap_raz_wi },
7c8c5e6a
MZ
451 /* PMSWINC_EL0 */
452 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
7609c125 453 trap_raz_wi },
7c8c5e6a
MZ
454 /* PMSELR_EL0 */
455 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
7609c125 456 trap_raz_wi },
7c8c5e6a
MZ
457 /* PMCEID0_EL0 */
458 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
7609c125 459 trap_raz_wi },
7c8c5e6a
MZ
460 /* PMCEID1_EL0 */
461 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
7609c125 462 trap_raz_wi },
7c8c5e6a
MZ
463 /* PMCCNTR_EL0 */
464 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
7609c125 465 trap_raz_wi },
7c8c5e6a
MZ
466 /* PMXEVTYPER_EL0 */
467 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
7609c125 468 trap_raz_wi },
7c8c5e6a
MZ
469 /* PMXEVCNTR_EL0 */
470 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
7609c125 471 trap_raz_wi },
7c8c5e6a
MZ
472 /* PMUSERENR_EL0 */
473 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
7609c125 474 trap_raz_wi },
7c8c5e6a
MZ
475 /* PMOVSSET_EL0 */
476 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
7609c125 477 trap_raz_wi },
7c8c5e6a
MZ
478
479 /* TPIDR_EL0 */
480 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
481 NULL, reset_unknown, TPIDR_EL0 },
482 /* TPIDRRO_EL0 */
483 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
484 NULL, reset_unknown, TPIDRRO_EL0 },
62a89c44
MZ
485
486 /* DACR32_EL2 */
487 { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
488 NULL, reset_unknown, DACR32_EL2 },
489 /* IFSR32_EL2 */
490 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001),
491 NULL, reset_unknown, IFSR32_EL2 },
492 /* FPEXC32_EL2 */
493 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000),
494 NULL, reset_val, FPEXC32_EL2, 0x70 },
495};
496
72564016
MZ
497/* Trapped cp14 registers */
498static const struct sys_reg_desc cp14_regs[] = {
499};
500
a9866ba0
MZ
501/* Trapped cp14 64bit registers */
502static const struct sys_reg_desc cp14_64_regs[] = {
503};
504
4d44923b
MZ
505/*
506 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
507 * depending on the way they are accessed (as a 32bit or a 64bit
508 * register).
509 */
62a89c44 510static const struct sys_reg_desc cp15_regs[] = {
4d44923b
MZ
511 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR },
512 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
513 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
514 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
515 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
516 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
517 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
518 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
519 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
520 { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
521 { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
522
62a89c44
MZ
523 /*
524 * DC{C,I,CI}SW operations:
525 */
526 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
527 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
528 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
4d44923b 529
7609c125
MZ
530 /* PMU */
531 { Op1( 0), CRn( 9), CRm(12), Op2( 0), trap_raz_wi },
532 { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi },
533 { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi },
534 { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi },
535 { Op1( 0), CRn( 9), CRm(12), Op2( 5), trap_raz_wi },
536 { Op1( 0), CRn( 9), CRm(12), Op2( 6), trap_raz_wi },
537 { Op1( 0), CRn( 9), CRm(12), Op2( 7), trap_raz_wi },
538 { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi },
539 { Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi },
540 { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi },
541 { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi },
542 { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi },
543 { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi },
4d44923b
MZ
544
545 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
546 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
547 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
548 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
549 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
550
a9866ba0
MZ
551};
552
553static const struct sys_reg_desc cp15_64_regs[] = {
554 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
4d44923b 555 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
7c8c5e6a
MZ
556};
557
558/* Target specific emulation tables */
559static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
560
561void kvm_register_target_sys_reg_table(unsigned int target,
562 struct kvm_sys_reg_target_table *table)
563{
564 target_tables[target] = table;
565}
566
567/* Get specific register table for this target. */
62a89c44
MZ
568static const struct sys_reg_desc *get_target_table(unsigned target,
569 bool mode_is_64,
570 size_t *num)
7c8c5e6a
MZ
571{
572 struct kvm_sys_reg_target_table *table;
573
574 table = target_tables[target];
62a89c44
MZ
575 if (mode_is_64) {
576 *num = table->table64.num;
577 return table->table64.table;
578 } else {
579 *num = table->table32.num;
580 return table->table32.table;
581 }
7c8c5e6a
MZ
582}
583
584static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
585 const struct sys_reg_desc table[],
586 unsigned int num)
587{
588 unsigned int i;
589
590 for (i = 0; i < num; i++) {
591 const struct sys_reg_desc *r = &table[i];
592
593 if (params->Op0 != r->Op0)
594 continue;
595 if (params->Op1 != r->Op1)
596 continue;
597 if (params->CRn != r->CRn)
598 continue;
599 if (params->CRm != r->CRm)
600 continue;
601 if (params->Op2 != r->Op2)
602 continue;
603
604 return r;
605 }
606 return NULL;
607}
608
62a89c44
MZ
609int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
610{
611 kvm_inject_undefined(vcpu);
612 return 1;
613}
614
72564016
MZ
615/*
616 * emulate_cp -- tries to match a sys_reg access in a handling table, and
617 * call the corresponding trap handler.
618 *
619 * @params: pointer to the descriptor of the access
620 * @table: array of trap descriptors
621 * @num: size of the trap descriptor array
622 *
623 * Return 0 if the access has been handled, and -1 if not.
624 */
625static int emulate_cp(struct kvm_vcpu *vcpu,
626 const struct sys_reg_params *params,
627 const struct sys_reg_desc *table,
628 size_t num)
62a89c44 629{
72564016 630 const struct sys_reg_desc *r;
62a89c44 631
72564016
MZ
632 if (!table)
633 return -1; /* Not handled */
62a89c44 634
62a89c44 635 r = find_reg(params, table, num);
62a89c44 636
72564016 637 if (r) {
62a89c44
MZ
638 /*
639 * Not having an accessor means that we have
640 * configured a trap that we don't know how to
641 * handle. This certainly qualifies as a gross bug
642 * that should be fixed right away.
643 */
644 BUG_ON(!r->access);
645
646 if (likely(r->access(vcpu, params, r))) {
647 /* Skip instruction, since it was emulated */
648 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
62a89c44 649 }
72564016
MZ
650
651 /* Handled */
652 return 0;
653 }
654
655 /* Not handled */
656 return -1;
657}
658
659static void unhandled_cp_access(struct kvm_vcpu *vcpu,
660 struct sys_reg_params *params)
661{
662 u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
663 int cp;
664
665 switch(hsr_ec) {
666 case ESR_EL2_EC_CP15_32:
667 case ESR_EL2_EC_CP15_64:
668 cp = 15;
669 break;
670 case ESR_EL2_EC_CP14_MR:
671 case ESR_EL2_EC_CP14_64:
672 cp = 14;
673 break;
674 default:
675 WARN_ON((cp = -1));
62a89c44
MZ
676 }
677
72564016
MZ
678 kvm_err("Unsupported guest CP%d access at: %08lx\n",
679 cp, *vcpu_pc(vcpu));
62a89c44
MZ
680 print_sys_reg_instr(params);
681 kvm_inject_undefined(vcpu);
682}
683
684/**
72564016 685 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access
62a89c44
MZ
686 * @vcpu: The VCPU pointer
687 * @run: The kvm_run struct
688 */
72564016
MZ
689static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
690 const struct sys_reg_desc *global,
691 size_t nr_global,
692 const struct sys_reg_desc *target_specific,
693 size_t nr_specific)
62a89c44
MZ
694{
695 struct sys_reg_params params;
696 u32 hsr = kvm_vcpu_get_hsr(vcpu);
697 int Rt2 = (hsr >> 10) & 0xf;
698
2072d29c
MZ
699 params.is_aarch32 = true;
700 params.is_32bit = false;
62a89c44
MZ
701 params.CRm = (hsr >> 1) & 0xf;
702 params.Rt = (hsr >> 5) & 0xf;
703 params.is_write = ((hsr & 1) == 0);
704
705 params.Op0 = 0;
706 params.Op1 = (hsr >> 16) & 0xf;
707 params.Op2 = 0;
708 params.CRn = 0;
709
710 /*
711 * Massive hack here. Store Rt2 in the top 32bits so we only
712 * have one register to deal with. As we use the same trap
713 * backends between AArch32 and AArch64, we get away with it.
714 */
715 if (params.is_write) {
716 u64 val = *vcpu_reg(vcpu, params.Rt);
717 val &= 0xffffffff;
718 val |= *vcpu_reg(vcpu, Rt2) << 32;
719 *vcpu_reg(vcpu, params.Rt) = val;
720 }
721
72564016
MZ
722 if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
723 goto out;
724 if (!emulate_cp(vcpu, &params, global, nr_global))
725 goto out;
726
727 unhandled_cp_access(vcpu, &params);
62a89c44 728
72564016 729out:
62a89c44
MZ
730 /* Do the opposite hack for the read side */
731 if (!params.is_write) {
732 u64 val = *vcpu_reg(vcpu, params.Rt);
733 val >>= 32;
734 *vcpu_reg(vcpu, Rt2) = val;
735 }
736
737 return 1;
738}
739
740/**
741 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
742 * @vcpu: The VCPU pointer
743 * @run: The kvm_run struct
744 */
72564016
MZ
745static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
746 const struct sys_reg_desc *global,
747 size_t nr_global,
748 const struct sys_reg_desc *target_specific,
749 size_t nr_specific)
62a89c44
MZ
750{
751 struct sys_reg_params params;
752 u32 hsr = kvm_vcpu_get_hsr(vcpu);
753
2072d29c
MZ
754 params.is_aarch32 = true;
755 params.is_32bit = true;
62a89c44
MZ
756 params.CRm = (hsr >> 1) & 0xf;
757 params.Rt = (hsr >> 5) & 0xf;
758 params.is_write = ((hsr & 1) == 0);
759 params.CRn = (hsr >> 10) & 0xf;
760 params.Op0 = 0;
761 params.Op1 = (hsr >> 14) & 0x7;
762 params.Op2 = (hsr >> 17) & 0x7;
763
72564016
MZ
764 if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
765 return 1;
766 if (!emulate_cp(vcpu, &params, global, nr_global))
767 return 1;
768
769 unhandled_cp_access(vcpu, &params);
62a89c44
MZ
770 return 1;
771}
772
72564016
MZ
773int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
774{
775 const struct sys_reg_desc *target_specific;
776 size_t num;
777
778 target_specific = get_target_table(vcpu->arch.target, false, &num);
779 return kvm_handle_cp_64(vcpu,
a9866ba0 780 cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
72564016
MZ
781 target_specific, num);
782}
783
784int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
785{
786 const struct sys_reg_desc *target_specific;
787 size_t num;
788
789 target_specific = get_target_table(vcpu->arch.target, false, &num);
790 return kvm_handle_cp_32(vcpu,
791 cp15_regs, ARRAY_SIZE(cp15_regs),
792 target_specific, num);
793}
794
795int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
796{
797 return kvm_handle_cp_64(vcpu,
a9866ba0 798 cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
72564016
MZ
799 NULL, 0);
800}
801
802int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
803{
804 return kvm_handle_cp_32(vcpu,
805 cp14_regs, ARRAY_SIZE(cp14_regs),
806 NULL, 0);
807}
808
7c8c5e6a
MZ
809static int emulate_sys_reg(struct kvm_vcpu *vcpu,
810 const struct sys_reg_params *params)
811{
812 size_t num;
813 const struct sys_reg_desc *table, *r;
814
62a89c44 815 table = get_target_table(vcpu->arch.target, true, &num);
7c8c5e6a
MZ
816
817 /* Search target-specific then generic table. */
818 r = find_reg(params, table, num);
819 if (!r)
820 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
821
822 if (likely(r)) {
823 /*
824 * Not having an accessor means that we have
825 * configured a trap that we don't know how to
826 * handle. This certainly qualifies as a gross bug
827 * that should be fixed right away.
828 */
829 BUG_ON(!r->access);
830
831 if (likely(r->access(vcpu, params, r))) {
832 /* Skip instruction, since it was emulated */
833 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
834 return 1;
835 }
836 /* If access function fails, it should complain. */
837 } else {
838 kvm_err("Unsupported guest sys_reg access at: %lx\n",
839 *vcpu_pc(vcpu));
840 print_sys_reg_instr(params);
841 }
842 kvm_inject_undefined(vcpu);
843 return 1;
844}
845
846static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
847 const struct sys_reg_desc *table, size_t num)
848{
849 unsigned long i;
850
851 for (i = 0; i < num; i++)
852 if (table[i].reset)
853 table[i].reset(vcpu, &table[i]);
854}
855
856/**
857 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
858 * @vcpu: The VCPU pointer
859 * @run: The kvm_run struct
860 */
861int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
862{
863 struct sys_reg_params params;
864 unsigned long esr = kvm_vcpu_get_hsr(vcpu);
865
2072d29c
MZ
866 params.is_aarch32 = false;
867 params.is_32bit = false;
7c8c5e6a
MZ
868 params.Op0 = (esr >> 20) & 3;
869 params.Op1 = (esr >> 14) & 0x7;
870 params.CRn = (esr >> 10) & 0xf;
871 params.CRm = (esr >> 1) & 0xf;
872 params.Op2 = (esr >> 17) & 0x7;
873 params.Rt = (esr >> 5) & 0x1f;
874 params.is_write = !(esr & 1);
875
876 return emulate_sys_reg(vcpu, &params);
877}
878
879/******************************************************************************
880 * Userspace API
881 *****************************************************************************/
882
883static bool index_to_params(u64 id, struct sys_reg_params *params)
884{
885 switch (id & KVM_REG_SIZE_MASK) {
886 case KVM_REG_SIZE_U64:
887 /* Any unused index bits means it's not valid. */
888 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
889 | KVM_REG_ARM_COPROC_MASK
890 | KVM_REG_ARM64_SYSREG_OP0_MASK
891 | KVM_REG_ARM64_SYSREG_OP1_MASK
892 | KVM_REG_ARM64_SYSREG_CRN_MASK
893 | KVM_REG_ARM64_SYSREG_CRM_MASK
894 | KVM_REG_ARM64_SYSREG_OP2_MASK))
895 return false;
896 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
897 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
898 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
899 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
900 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
901 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
902 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
903 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
904 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
905 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
906 return true;
907 default:
908 return false;
909 }
910}
911
912/* Decode an index value, and find the sys_reg_desc entry. */
913static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
914 u64 id)
915{
916 size_t num;
917 const struct sys_reg_desc *table, *r;
918 struct sys_reg_params params;
919
920 /* We only do sys_reg for now. */
921 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
922 return NULL;
923
924 if (!index_to_params(id, &params))
925 return NULL;
926
62a89c44 927 table = get_target_table(vcpu->arch.target, true, &num);
7c8c5e6a
MZ
928 r = find_reg(&params, table, num);
929 if (!r)
930 r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
931
932 /* Not saved in the sys_reg array? */
933 if (r && !r->reg)
934 r = NULL;
935
936 return r;
937}
938
939/*
940 * These are the invariant sys_reg registers: we let the guest see the
941 * host versions of these, so they're part of the guest state.
942 *
943 * A future CPU may provide a mechanism to present different values to
944 * the guest, or a future kvm may trap them.
945 */
946
947#define FUNCTION_INVARIANT(reg) \
948 static void get_##reg(struct kvm_vcpu *v, \
949 const struct sys_reg_desc *r) \
950 { \
951 u64 val; \
952 \
953 asm volatile("mrs %0, " __stringify(reg) "\n" \
954 : "=r" (val)); \
955 ((struct sys_reg_desc *)r)->val = val; \
956 }
957
958FUNCTION_INVARIANT(midr_el1)
959FUNCTION_INVARIANT(ctr_el0)
960FUNCTION_INVARIANT(revidr_el1)
961FUNCTION_INVARIANT(id_pfr0_el1)
962FUNCTION_INVARIANT(id_pfr1_el1)
963FUNCTION_INVARIANT(id_dfr0_el1)
964FUNCTION_INVARIANT(id_afr0_el1)
965FUNCTION_INVARIANT(id_mmfr0_el1)
966FUNCTION_INVARIANT(id_mmfr1_el1)
967FUNCTION_INVARIANT(id_mmfr2_el1)
968FUNCTION_INVARIANT(id_mmfr3_el1)
969FUNCTION_INVARIANT(id_isar0_el1)
970FUNCTION_INVARIANT(id_isar1_el1)
971FUNCTION_INVARIANT(id_isar2_el1)
972FUNCTION_INVARIANT(id_isar3_el1)
973FUNCTION_INVARIANT(id_isar4_el1)
974FUNCTION_INVARIANT(id_isar5_el1)
975FUNCTION_INVARIANT(clidr_el1)
976FUNCTION_INVARIANT(aidr_el1)
977
978/* ->val is filled in by kvm_sys_reg_table_init() */
979static struct sys_reg_desc invariant_sys_regs[] = {
980 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000),
981 NULL, get_midr_el1 },
982 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110),
983 NULL, get_revidr_el1 },
984 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000),
985 NULL, get_id_pfr0_el1 },
986 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001),
987 NULL, get_id_pfr1_el1 },
988 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010),
989 NULL, get_id_dfr0_el1 },
990 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011),
991 NULL, get_id_afr0_el1 },
992 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100),
993 NULL, get_id_mmfr0_el1 },
994 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101),
995 NULL, get_id_mmfr1_el1 },
996 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110),
997 NULL, get_id_mmfr2_el1 },
998 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111),
999 NULL, get_id_mmfr3_el1 },
1000 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
1001 NULL, get_id_isar0_el1 },
1002 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001),
1003 NULL, get_id_isar1_el1 },
1004 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
1005 NULL, get_id_isar2_el1 },
1006 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011),
1007 NULL, get_id_isar3_el1 },
1008 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100),
1009 NULL, get_id_isar4_el1 },
1010 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101),
1011 NULL, get_id_isar5_el1 },
1012 { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001),
1013 NULL, get_clidr_el1 },
1014 { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111),
1015 NULL, get_aidr_el1 },
1016 { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001),
1017 NULL, get_ctr_el0 },
1018};
1019
26c99af1 1020static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
7c8c5e6a 1021{
7c8c5e6a
MZ
1022 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
1023 return -EFAULT;
1024 return 0;
1025}
1026
26c99af1 1027static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
7c8c5e6a 1028{
7c8c5e6a
MZ
1029 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
1030 return -EFAULT;
1031 return 0;
1032}
1033
1034static int get_invariant_sys_reg(u64 id, void __user *uaddr)
1035{
1036 struct sys_reg_params params;
1037 const struct sys_reg_desc *r;
1038
1039 if (!index_to_params(id, &params))
1040 return -ENOENT;
1041
1042 r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
1043 if (!r)
1044 return -ENOENT;
1045
1046 return reg_to_user(uaddr, &r->val, id);
1047}
1048
1049static int set_invariant_sys_reg(u64 id, void __user *uaddr)
1050{
1051 struct sys_reg_params params;
1052 const struct sys_reg_desc *r;
1053 int err;
1054 u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
1055
1056 if (!index_to_params(id, &params))
1057 return -ENOENT;
1058 r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
1059 if (!r)
1060 return -ENOENT;
1061
1062 err = reg_from_user(&val, uaddr, id);
1063 if (err)
1064 return err;
1065
1066 /* This is what we mean by invariant: you can't change it. */
1067 if (r->val != val)
1068 return -EINVAL;
1069
1070 return 0;
1071}
1072
1073static bool is_valid_cache(u32 val)
1074{
1075 u32 level, ctype;
1076
1077 if (val >= CSSELR_MAX)
1078 return -ENOENT;
1079
1080 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
1081 level = (val >> 1);
1082 ctype = (cache_levels >> (level * 3)) & 7;
1083
1084 switch (ctype) {
1085 case 0: /* No cache */
1086 return false;
1087 case 1: /* Instruction cache only */
1088 return (val & 1);
1089 case 2: /* Data cache only */
1090 case 4: /* Unified cache */
1091 return !(val & 1);
1092 case 3: /* Separate instruction and data caches */
1093 return true;
1094 default: /* Reserved: we can't know instruction or data. */
1095 return false;
1096 }
1097}
1098
1099static int demux_c15_get(u64 id, void __user *uaddr)
1100{
1101 u32 val;
1102 u32 __user *uval = uaddr;
1103
1104 /* Fail if we have unknown bits set. */
1105 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1106 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1107 return -ENOENT;
1108
1109 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
1110 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
1111 if (KVM_REG_SIZE(id) != 4)
1112 return -ENOENT;
1113 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
1114 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
1115 if (!is_valid_cache(val))
1116 return -ENOENT;
1117
1118 return put_user(get_ccsidr(val), uval);
1119 default:
1120 return -ENOENT;
1121 }
1122}
1123
1124static int demux_c15_set(u64 id, void __user *uaddr)
1125{
1126 u32 val, newval;
1127 u32 __user *uval = uaddr;
1128
1129 /* Fail if we have unknown bits set. */
1130 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1131 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1132 return -ENOENT;
1133
1134 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
1135 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
1136 if (KVM_REG_SIZE(id) != 4)
1137 return -ENOENT;
1138 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
1139 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
1140 if (!is_valid_cache(val))
1141 return -ENOENT;
1142
1143 if (get_user(newval, uval))
1144 return -EFAULT;
1145
1146 /* This is also invariant: you can't change it. */
1147 if (newval != get_ccsidr(val))
1148 return -EINVAL;
1149 return 0;
1150 default:
1151 return -ENOENT;
1152 }
1153}
1154
1155int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1156{
1157 const struct sys_reg_desc *r;
1158 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
1159
1160 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1161 return demux_c15_get(reg->id, uaddr);
1162
1163 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
1164 return -ENOENT;
1165
1166 r = index_to_sys_reg_desc(vcpu, reg->id);
1167 if (!r)
1168 return get_invariant_sys_reg(reg->id, uaddr);
1169
1170 return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
1171}
1172
1173int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1174{
1175 const struct sys_reg_desc *r;
1176 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
1177
1178 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1179 return demux_c15_set(reg->id, uaddr);
1180
1181 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
1182 return -ENOENT;
1183
1184 r = index_to_sys_reg_desc(vcpu, reg->id);
1185 if (!r)
1186 return set_invariant_sys_reg(reg->id, uaddr);
1187
1188 return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
1189}
1190
1191static unsigned int num_demux_regs(void)
1192{
1193 unsigned int i, count = 0;
1194
1195 for (i = 0; i < CSSELR_MAX; i++)
1196 if (is_valid_cache(i))
1197 count++;
1198
1199 return count;
1200}
1201
1202static int write_demux_regids(u64 __user *uindices)
1203{
efd48cea 1204 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
7c8c5e6a
MZ
1205 unsigned int i;
1206
1207 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
1208 for (i = 0; i < CSSELR_MAX; i++) {
1209 if (!is_valid_cache(i))
1210 continue;
1211 if (put_user(val | i, uindices))
1212 return -EFAULT;
1213 uindices++;
1214 }
1215 return 0;
1216}
1217
1218static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
1219{
1220 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
1221 KVM_REG_ARM64_SYSREG |
1222 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
1223 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
1224 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
1225 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
1226 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
1227}
1228
1229static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
1230{
1231 if (!*uind)
1232 return true;
1233
1234 if (put_user(sys_reg_to_index(reg), *uind))
1235 return false;
1236
1237 (*uind)++;
1238 return true;
1239}
1240
1241/* Assumed ordered tables, see kvm_sys_reg_table_init. */
1242static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
1243{
1244 const struct sys_reg_desc *i1, *i2, *end1, *end2;
1245 unsigned int total = 0;
1246 size_t num;
1247
1248 /* We check for duplicates here, to allow arch-specific overrides. */
62a89c44 1249 i1 = get_target_table(vcpu->arch.target, true, &num);
7c8c5e6a
MZ
1250 end1 = i1 + num;
1251 i2 = sys_reg_descs;
1252 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
1253
1254 BUG_ON(i1 == end1 || i2 == end2);
1255
1256 /* Walk carefully, as both tables may refer to the same register. */
1257 while (i1 || i2) {
1258 int cmp = cmp_sys_reg(i1, i2);
1259 /* target-specific overrides generic entry. */
1260 if (cmp <= 0) {
1261 /* Ignore registers we trap but don't save. */
1262 if (i1->reg) {
1263 if (!copy_reg_to_user(i1, &uind))
1264 return -EFAULT;
1265 total++;
1266 }
1267 } else {
1268 /* Ignore registers we trap but don't save. */
1269 if (i2->reg) {
1270 if (!copy_reg_to_user(i2, &uind))
1271 return -EFAULT;
1272 total++;
1273 }
1274 }
1275
1276 if (cmp <= 0 && ++i1 == end1)
1277 i1 = NULL;
1278 if (cmp >= 0 && ++i2 == end2)
1279 i2 = NULL;
1280 }
1281 return total;
1282}
1283
1284unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
1285{
1286 return ARRAY_SIZE(invariant_sys_regs)
1287 + num_demux_regs()
1288 + walk_sys_regs(vcpu, (u64 __user *)NULL);
1289}
1290
1291int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1292{
1293 unsigned int i;
1294 int err;
1295
1296 /* Then give them all the invariant registers' indices. */
1297 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
1298 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
1299 return -EFAULT;
1300 uindices++;
1301 }
1302
1303 err = walk_sys_regs(vcpu, uindices);
1304 if (err < 0)
1305 return err;
1306 uindices += err;
1307
1308 return write_demux_regids(uindices);
1309}
1310
e6a95517
MZ
1311static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
1312{
1313 unsigned int i;
1314
1315 for (i = 1; i < n; i++) {
1316 if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
1317 kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
1318 return 1;
1319 }
1320 }
1321
1322 return 0;
1323}
1324
7c8c5e6a
MZ
1325void kvm_sys_reg_table_init(void)
1326{
1327 unsigned int i;
1328 struct sys_reg_desc clidr;
1329
1330 /* Make sure tables are unique and in order. */
e6a95517
MZ
1331 BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
1332 BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
1333 BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
1334 BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
1335 BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
1336 BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
7c8c5e6a
MZ
1337
1338 /* We abuse the reset function to overwrite the table itself. */
1339 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
1340 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
1341
1342 /*
1343 * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
1344 *
1345 * If software reads the Cache Type fields from Ctype1
1346 * upwards, once it has seen a value of 0b000, no caches
1347 * exist at further-out levels of the hierarchy. So, for
1348 * example, if Ctype3 is the first Cache Type field with a
1349 * value of 0b000, the values of Ctype4 to Ctype7 must be
1350 * ignored.
1351 */
1352 get_clidr_el1(NULL, &clidr); /* Ugly... */
1353 cache_levels = clidr.val;
1354 for (i = 0; i < 7; i++)
1355 if (((cache_levels >> (i*3)) & 7) == 0)
1356 break;
1357 /* Clear all higher bits. */
1358 cache_levels &= (1 << (i*3))-1;
1359}
1360
1361/**
1362 * kvm_reset_sys_regs - sets system registers to reset value
1363 * @vcpu: The VCPU pointer
1364 *
1365 * This function finds the right table above and sets the registers on the
1366 * virtual CPU struct to their architecturally defined reset values.
1367 */
1368void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
1369{
1370 size_t num;
1371 const struct sys_reg_desc *table;
1372
1373 /* Catch someone adding a register without putting in reset entry. */
1374 memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
1375
1376 /* Generic chip reset first (so target could override). */
1377 reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
1378
62a89c44 1379 table = get_target_table(vcpu->arch.target, true, &num);
7c8c5e6a
MZ
1380 reset_sys_reg_descs(vcpu, table, num);
1381
1382 for (num = 1; num < NR_SYS_REGS; num++)
1383 if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
1384 panic("Didn't reset vcpu_sys_reg(%zi)", num);
1385}