2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Authors: Rusty Russell <rusty@rustcorp.com.au>
4 * Christoffer Dall <c.dall@virtualopensystems.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 #include <linux/bsearch.h>
22 #include <linux/kvm_host.h>
23 #include <linux/uaccess.h>
24 #include <asm/kvm_arm.h>
25 #include <asm/kvm_host.h>
26 #include <asm/kvm_emulate.h>
27 #include <asm/kvm_coproc.h>
28 #include <asm/kvm_mmu.h>
29 #include <asm/cacheflush.h>
30 #include <asm/cputype.h>
31 #include <trace/events/kvm.h>
33 #include "../vfp/vfpinstr.h"
39 /******************************************************************************
40 * Co-processor emulation
41 *****************************************************************************/
43 static bool write_to_read_only(struct kvm_vcpu
*vcpu
,
44 const struct coproc_params
*params
)
46 WARN_ONCE(1, "CP15 write to read-only register\n");
47 print_cp_instr(params
);
48 kvm_inject_undefined(vcpu
);
52 static bool read_from_write_only(struct kvm_vcpu
*vcpu
,
53 const struct coproc_params
*params
)
55 WARN_ONCE(1, "CP15 read to write-only register\n");
56 print_cp_instr(params
);
57 kvm_inject_undefined(vcpu
);
61 /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
62 static u32 cache_levels
;
64 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
68 * kvm_vcpu_arch.cp15 holds cp15 registers as an array of u32, but some
69 * of cp15 registers can be viewed either as couple of two u32 registers
70 * or one u64 register. Current u64 register encoding is that least
71 * significant u32 word is followed by most significant u32 word.
73 static inline void vcpu_cp15_reg64_set(struct kvm_vcpu
*vcpu
,
74 const struct coproc_reg
*r
,
77 vcpu_cp15(vcpu
, r
->reg
) = val
& 0xffffffff;
78 vcpu_cp15(vcpu
, r
->reg
+ 1) = val
>> 32;
81 static inline u64
vcpu_cp15_reg64_get(struct kvm_vcpu
*vcpu
,
82 const struct coproc_reg
*r
)
86 val
= vcpu_cp15(vcpu
, r
->reg
+ 1);
88 val
= val
| vcpu_cp15(vcpu
, r
->reg
);
92 int kvm_handle_cp10_id(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
94 kvm_inject_undefined(vcpu
);
98 int kvm_handle_cp_0_13_access(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
101 * We can get here, if the host has been built without VFPv3 support,
102 * but the guest attempted a floating point operation.
104 kvm_inject_undefined(vcpu
);
108 int kvm_handle_cp14_load_store(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
110 kvm_inject_undefined(vcpu
);
114 int kvm_handle_cp14_access(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
116 kvm_inject_undefined(vcpu
);
120 static void reset_mpidr(struct kvm_vcpu
*vcpu
, const struct coproc_reg
*r
)
123 * Compute guest MPIDR. We build a virtual cluster out of the
124 * vcpu_id, but we read the 'U' bit from the underlying
127 vcpu_cp15(vcpu
, c0_MPIDR
) = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK
) |
128 ((vcpu
->vcpu_id
>> 2) << MPIDR_LEVEL_BITS
) |
129 (vcpu
->vcpu_id
& 3));
132 /* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */
133 static bool access_actlr(struct kvm_vcpu
*vcpu
,
134 const struct coproc_params
*p
,
135 const struct coproc_reg
*r
)
138 return ignore_write(vcpu
, p
);
140 *vcpu_reg(vcpu
, p
->Rt1
) = vcpu_cp15(vcpu
, c1_ACTLR
);
144 /* TRM entries A7:4.3.56, A15:4.3.60 - R/O. */
145 static bool access_cbar(struct kvm_vcpu
*vcpu
,
146 const struct coproc_params
*p
,
147 const struct coproc_reg
*r
)
150 return write_to_read_only(vcpu
, p
);
151 return read_zero(vcpu
, p
);
154 /* TRM entries A7:4.3.49, A15:4.3.48 - R/O WI */
155 static bool access_l2ctlr(struct kvm_vcpu
*vcpu
,
156 const struct coproc_params
*p
,
157 const struct coproc_reg
*r
)
160 return ignore_write(vcpu
, p
);
162 *vcpu_reg(vcpu
, p
->Rt1
) = vcpu_cp15(vcpu
, c9_L2CTLR
);
166 static void reset_l2ctlr(struct kvm_vcpu
*vcpu
, const struct coproc_reg
*r
)
170 asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr
));
171 l2ctlr
&= ~(3 << 24);
172 ncores
= atomic_read(&vcpu
->kvm
->online_vcpus
) - 1;
173 /* How many cores in the current cluster and the next ones */
174 ncores
-= (vcpu
->vcpu_id
& ~3);
175 /* Cap it to the maximum number of cores in a single cluster */
176 ncores
= min(ncores
, 3U);
177 l2ctlr
|= (ncores
& 3) << 24;
179 vcpu_cp15(vcpu
, c9_L2CTLR
) = l2ctlr
;
182 static void reset_actlr(struct kvm_vcpu
*vcpu
, const struct coproc_reg
*r
)
186 /* ACTLR contains SMP bit: make sure you create all cpus first! */
187 asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr
));
188 /* Make the SMP bit consistent with the guest configuration */
189 if (atomic_read(&vcpu
->kvm
->online_vcpus
) > 1)
194 vcpu_cp15(vcpu
, c1_ACTLR
) = actlr
;
198 * TRM entries: A7:4.3.50, A15:4.3.49
199 * R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored).
201 static bool access_l2ectlr(struct kvm_vcpu
*vcpu
,
202 const struct coproc_params
*p
,
203 const struct coproc_reg
*r
)
206 return ignore_write(vcpu
, p
);
208 *vcpu_reg(vcpu
, p
->Rt1
) = 0;
213 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
215 static bool access_dcsw(struct kvm_vcpu
*vcpu
,
216 const struct coproc_params
*p
,
217 const struct coproc_reg
*r
)
220 return read_from_write_only(vcpu
, p
);
222 kvm_set_way_flush(vcpu
);
227 * Generic accessor for VM registers. Only called as long as HCR_TVM
228 * is set. If the guest enables the MMU, we stop trapping the VM
229 * sys_regs and leave it in complete control of the caches.
231 * Used by the cpu-specific code.
233 bool access_vm_reg(struct kvm_vcpu
*vcpu
,
234 const struct coproc_params
*p
,
235 const struct coproc_reg
*r
)
237 bool was_enabled
= vcpu_has_cache_enabled(vcpu
);
239 BUG_ON(!p
->is_write
);
241 vcpu_cp15(vcpu
, r
->reg
) = *vcpu_reg(vcpu
, p
->Rt1
);
243 vcpu_cp15(vcpu
, r
->reg
+ 1) = *vcpu_reg(vcpu
, p
->Rt2
);
245 kvm_toggle_cache(vcpu
, was_enabled
);
249 static bool access_gic_sgi(struct kvm_vcpu
*vcpu
,
250 const struct coproc_params
*p
,
251 const struct coproc_reg
*r
)
256 return read_from_write_only(vcpu
, p
);
258 reg
= (u64
)*vcpu_reg(vcpu
, p
->Rt2
) << 32;
259 reg
|= *vcpu_reg(vcpu
, p
->Rt1
) ;
261 vgic_v3_dispatch_sgi(vcpu
, reg
);
266 static bool access_gic_sre(struct kvm_vcpu
*vcpu
,
267 const struct coproc_params
*p
,
268 const struct coproc_reg
*r
)
271 return ignore_write(vcpu
, p
);
273 *vcpu_reg(vcpu
, p
->Rt1
) = vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_sre
;
279 * We could trap ID_DFR0 and tell the guest we don't support performance
280 * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
281 * NAKed, so it will read the PMCR anyway.
283 * Therefore we tell the guest we have 0 counters. Unfortunately, we
284 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
285 * all PM registers, which doesn't crash the guest kernel at least.
287 static bool pm_fake(struct kvm_vcpu
*vcpu
,
288 const struct coproc_params
*p
,
289 const struct coproc_reg
*r
)
292 return ignore_write(vcpu
, p
);
294 return read_zero(vcpu
, p
);
297 #define access_pmcr pm_fake
298 #define access_pmcntenset pm_fake
299 #define access_pmcntenclr pm_fake
300 #define access_pmovsr pm_fake
301 #define access_pmselr pm_fake
302 #define access_pmceid0 pm_fake
303 #define access_pmceid1 pm_fake
304 #define access_pmccntr pm_fake
305 #define access_pmxevtyper pm_fake
306 #define access_pmxevcntr pm_fake
307 #define access_pmuserenr pm_fake
308 #define access_pmintenset pm_fake
309 #define access_pmintenclr pm_fake
311 /* Architected CP15 registers.
312 * CRn denotes the primary register number, but is copied to the CRm in the
313 * user space API for 64-bit register access in line with the terminology used
315 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
316 * registers preceding 32-bit ones.
318 static const struct coproc_reg cp15_regs
[] = {
319 /* MPIDR: we use VMPIDR for guest access. */
320 { CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32
,
321 NULL
, reset_mpidr
, c0_MPIDR
},
323 /* CSSELR: swapped by interrupt.S. */
324 { CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32
,
325 NULL
, reset_unknown
, c0_CSSELR
},
327 /* ACTLR: trapped by HCR.TAC bit. */
328 { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32
,
329 access_actlr
, reset_actlr
, c1_ACTLR
},
331 /* CPACR: swapped by interrupt.S. */
332 { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32
,
333 NULL
, reset_val
, c1_CPACR
, 0x00000000 },
335 /* TTBR0/TTBR1/TTBCR: swapped by interrupt.S. */
336 { CRm64( 2), Op1( 0), is64
, access_vm_reg
, reset_unknown64
, c2_TTBR0
},
337 { CRn(2), CRm( 0), Op1( 0), Op2( 0), is32
,
338 access_vm_reg
, reset_unknown
, c2_TTBR0
},
339 { CRn(2), CRm( 0), Op1( 0), Op2( 1), is32
,
340 access_vm_reg
, reset_unknown
, c2_TTBR1
},
341 { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32
,
342 access_vm_reg
, reset_val
, c2_TTBCR
, 0x00000000 },
343 { CRm64( 2), Op1( 1), is64
, access_vm_reg
, reset_unknown64
, c2_TTBR1
},
346 /* DACR: swapped by interrupt.S. */
347 { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32
,
348 access_vm_reg
, reset_unknown
, c3_DACR
},
350 /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */
351 { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32
,
352 access_vm_reg
, reset_unknown
, c5_DFSR
},
353 { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32
,
354 access_vm_reg
, reset_unknown
, c5_IFSR
},
355 { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32
,
356 access_vm_reg
, reset_unknown
, c5_ADFSR
},
357 { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32
,
358 access_vm_reg
, reset_unknown
, c5_AIFSR
},
360 /* DFAR/IFAR: swapped by interrupt.S. */
361 { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32
,
362 access_vm_reg
, reset_unknown
, c6_DFAR
},
363 { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32
,
364 access_vm_reg
, reset_unknown
, c6_IFAR
},
366 /* PAR swapped by interrupt.S */
367 { CRm64( 7), Op1( 0), is64
, NULL
, reset_unknown64
, c7_PAR
},
370 * DC{C,I,CI}SW operations:
372 { CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32
, access_dcsw
},
373 { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32
, access_dcsw
},
374 { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32
, access_dcsw
},
376 * L2CTLR access (guest wants to know #CPUs).
378 { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32
,
379 access_l2ctlr
, reset_l2ctlr
, c9_L2CTLR
},
380 { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32
, access_l2ectlr
},
383 * Dummy performance monitor implementation.
385 { CRn( 9), CRm(12), Op1( 0), Op2( 0), is32
, access_pmcr
},
386 { CRn( 9), CRm(12), Op1( 0), Op2( 1), is32
, access_pmcntenset
},
387 { CRn( 9), CRm(12), Op1( 0), Op2( 2), is32
, access_pmcntenclr
},
388 { CRn( 9), CRm(12), Op1( 0), Op2( 3), is32
, access_pmovsr
},
389 { CRn( 9), CRm(12), Op1( 0), Op2( 5), is32
, access_pmselr
},
390 { CRn( 9), CRm(12), Op1( 0), Op2( 6), is32
, access_pmceid0
},
391 { CRn( 9), CRm(12), Op1( 0), Op2( 7), is32
, access_pmceid1
},
392 { CRn( 9), CRm(13), Op1( 0), Op2( 0), is32
, access_pmccntr
},
393 { CRn( 9), CRm(13), Op1( 0), Op2( 1), is32
, access_pmxevtyper
},
394 { CRn( 9), CRm(13), Op1( 0), Op2( 2), is32
, access_pmxevcntr
},
395 { CRn( 9), CRm(14), Op1( 0), Op2( 0), is32
, access_pmuserenr
},
396 { CRn( 9), CRm(14), Op1( 0), Op2( 1), is32
, access_pmintenset
},
397 { CRn( 9), CRm(14), Op1( 0), Op2( 2), is32
, access_pmintenclr
},
399 /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */
400 { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32
,
401 access_vm_reg
, reset_unknown
, c10_PRRR
},
402 { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32
,
403 access_vm_reg
, reset_unknown
, c10_NMRR
},
405 /* AMAIR0/AMAIR1: swapped by interrupt.S. */
406 { CRn(10), CRm( 3), Op1( 0), Op2( 0), is32
,
407 access_vm_reg
, reset_unknown
, c10_AMAIR0
},
408 { CRn(10), CRm( 3), Op1( 0), Op2( 1), is32
,
409 access_vm_reg
, reset_unknown
, c10_AMAIR1
},
412 { CRm64(12), Op1( 0), is64
, access_gic_sgi
},
414 /* VBAR: swapped by interrupt.S. */
415 { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32
,
416 NULL
, reset_val
, c12_VBAR
, 0x00000000 },
419 { CRn(12), CRm(12), Op1( 0), Op2(5), is32
, access_gic_sre
},
421 /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
422 { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32
,
423 access_vm_reg
, reset_val
, c13_CID
, 0x00000000 },
424 { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32
,
425 NULL
, reset_unknown
, c13_TID_URW
},
426 { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32
,
427 NULL
, reset_unknown
, c13_TID_URO
},
428 { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32
,
429 NULL
, reset_unknown
, c13_TID_PRIV
},
431 /* CNTKCTL: swapped by interrupt.S. */
432 { CRn(14), CRm( 1), Op1( 0), Op2( 0), is32
,
433 NULL
, reset_val
, c14_CNTKCTL
, 0x00000000 },
435 /* The Configuration Base Address Register. */
436 { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32
, access_cbar
},
439 static int check_reg_table(const struct coproc_reg
*table
, unsigned int n
)
443 for (i
= 1; i
< n
; i
++) {
444 if (cmp_reg(&table
[i
-1], &table
[i
]) >= 0) {
445 kvm_err("reg table %p out of order (%d)\n", table
, i
- 1);
453 /* Target specific emulation tables */
454 static struct kvm_coproc_target_table
*target_tables
[KVM_ARM_NUM_TARGETS
];
456 void kvm_register_target_coproc_table(struct kvm_coproc_target_table
*table
)
458 BUG_ON(check_reg_table(table
->table
, table
->num
));
459 target_tables
[table
->target
] = table
;
462 /* Get specific register table for this target. */
463 static const struct coproc_reg
*get_target_table(unsigned target
, size_t *num
)
465 struct kvm_coproc_target_table
*table
;
467 table
= target_tables
[target
];
472 #define reg_to_match_value(x) \
475 val = (x)->CRn << 11; \
476 val |= (x)->CRm << 7; \
477 val |= (x)->Op1 << 4; \
478 val |= (x)->Op2 << 1; \
479 val |= !(x)->is_64bit; \
483 static int match_reg(const void *key
, const void *elt
)
485 const unsigned long pval
= (unsigned long)key
;
486 const struct coproc_reg
*r
= elt
;
488 return pval
- reg_to_match_value(r
);
491 static const struct coproc_reg
*find_reg(const struct coproc_params
*params
,
492 const struct coproc_reg table
[],
495 unsigned long pval
= reg_to_match_value(params
);
497 return bsearch((void *)pval
, table
, num
, sizeof(table
[0]), match_reg
);
500 static int emulate_cp15(struct kvm_vcpu
*vcpu
,
501 const struct coproc_params
*params
)
504 const struct coproc_reg
*table
, *r
;
506 trace_kvm_emulate_cp15_imp(params
->Op1
, params
->Rt1
, params
->CRn
,
507 params
->CRm
, params
->Op2
, params
->is_write
);
509 table
= get_target_table(vcpu
->arch
.target
, &num
);
511 /* Search target-specific then generic table. */
512 r
= find_reg(params
, table
, num
);
514 r
= find_reg(params
, cp15_regs
, ARRAY_SIZE(cp15_regs
));
517 /* If we don't have an accessor, we should never get here! */
520 if (likely(r
->access(vcpu
, params
, r
))) {
521 /* Skip instruction, since it was emulated */
522 kvm_skip_instr(vcpu
, kvm_vcpu_trap_il_is32bit(vcpu
));
525 /* If access function fails, it should complain. */
526 kvm_err("Unsupported guest CP15 access at: %08lx\n",
528 print_cp_instr(params
);
529 kvm_inject_undefined(vcpu
);
536 * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
537 * @vcpu: The VCPU pointer
538 * @run: The kvm_run struct
540 int kvm_handle_cp15_64(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
542 struct coproc_params params
;
544 params
.CRn
= (kvm_vcpu_get_hsr(vcpu
) >> 1) & 0xf;
545 params
.Rt1
= (kvm_vcpu_get_hsr(vcpu
) >> 5) & 0xf;
546 params
.is_write
= ((kvm_vcpu_get_hsr(vcpu
) & 1) == 0);
547 params
.is_64bit
= true;
549 params
.Op1
= (kvm_vcpu_get_hsr(vcpu
) >> 16) & 0xf;
551 params
.Rt2
= (kvm_vcpu_get_hsr(vcpu
) >> 10) & 0xf;
554 return emulate_cp15(vcpu
, ¶ms
);
557 static void reset_coproc_regs(struct kvm_vcpu
*vcpu
,
558 const struct coproc_reg
*table
, size_t num
)
562 for (i
= 0; i
< num
; i
++)
564 table
[i
].reset(vcpu
, &table
[i
]);
568 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
569 * @vcpu: The VCPU pointer
570 * @run: The kvm_run struct
572 int kvm_handle_cp15_32(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
574 struct coproc_params params
;
576 params
.CRm
= (kvm_vcpu_get_hsr(vcpu
) >> 1) & 0xf;
577 params
.Rt1
= (kvm_vcpu_get_hsr(vcpu
) >> 5) & 0xf;
578 params
.is_write
= ((kvm_vcpu_get_hsr(vcpu
) & 1) == 0);
579 params
.is_64bit
= false;
581 params
.CRn
= (kvm_vcpu_get_hsr(vcpu
) >> 10) & 0xf;
582 params
.Op1
= (kvm_vcpu_get_hsr(vcpu
) >> 14) & 0x7;
583 params
.Op2
= (kvm_vcpu_get_hsr(vcpu
) >> 17) & 0x7;
586 return emulate_cp15(vcpu
, ¶ms
);
589 /******************************************************************************
591 *****************************************************************************/
593 static bool index_to_params(u64 id
, struct coproc_params
*params
)
595 switch (id
& KVM_REG_SIZE_MASK
) {
596 case KVM_REG_SIZE_U32
:
597 /* Any unused index bits means it's not valid. */
598 if (id
& ~(KVM_REG_ARCH_MASK
| KVM_REG_SIZE_MASK
599 | KVM_REG_ARM_COPROC_MASK
600 | KVM_REG_ARM_32_CRN_MASK
601 | KVM_REG_ARM_CRM_MASK
602 | KVM_REG_ARM_OPC1_MASK
603 | KVM_REG_ARM_32_OPC2_MASK
))
606 params
->is_64bit
= false;
607 params
->CRn
= ((id
& KVM_REG_ARM_32_CRN_MASK
)
608 >> KVM_REG_ARM_32_CRN_SHIFT
);
609 params
->CRm
= ((id
& KVM_REG_ARM_CRM_MASK
)
610 >> KVM_REG_ARM_CRM_SHIFT
);
611 params
->Op1
= ((id
& KVM_REG_ARM_OPC1_MASK
)
612 >> KVM_REG_ARM_OPC1_SHIFT
);
613 params
->Op2
= ((id
& KVM_REG_ARM_32_OPC2_MASK
)
614 >> KVM_REG_ARM_32_OPC2_SHIFT
);
616 case KVM_REG_SIZE_U64
:
617 /* Any unused index bits means it's not valid. */
618 if (id
& ~(KVM_REG_ARCH_MASK
| KVM_REG_SIZE_MASK
619 | KVM_REG_ARM_COPROC_MASK
620 | KVM_REG_ARM_CRM_MASK
621 | KVM_REG_ARM_OPC1_MASK
))
623 params
->is_64bit
= true;
624 /* CRm to CRn: see cp15_to_index for details */
625 params
->CRn
= ((id
& KVM_REG_ARM_CRM_MASK
)
626 >> KVM_REG_ARM_CRM_SHIFT
);
627 params
->Op1
= ((id
& KVM_REG_ARM_OPC1_MASK
)
628 >> KVM_REG_ARM_OPC1_SHIFT
);
637 /* Decode an index value, and find the cp15 coproc_reg entry. */
638 static const struct coproc_reg
*index_to_coproc_reg(struct kvm_vcpu
*vcpu
,
642 const struct coproc_reg
*table
, *r
;
643 struct coproc_params params
;
645 /* We only do cp15 for now. */
646 if ((id
& KVM_REG_ARM_COPROC_MASK
) >> KVM_REG_ARM_COPROC_SHIFT
!= 15)
649 if (!index_to_params(id
, ¶ms
))
652 table
= get_target_table(vcpu
->arch
.target
, &num
);
653 r
= find_reg(¶ms
, table
, num
);
655 r
= find_reg(¶ms
, cp15_regs
, ARRAY_SIZE(cp15_regs
));
657 /* Not saved in the cp15 array? */
665 * These are the invariant cp15 registers: we let the guest see the host
666 * versions of these, so they're part of the guest state.
668 * A future CPU may provide a mechanism to present different values to
669 * the guest, or a future kvm may trap them.
671 /* Unfortunately, there's no register-argument for mrc, so generate. */
672 #define FUNCTION_FOR32(crn, crm, op1, op2, name) \
673 static void get_##name(struct kvm_vcpu *v, \
674 const struct coproc_reg *r) \
678 asm volatile("mrc p15, " __stringify(op1) \
679 ", %0, c" __stringify(crn) \
680 ", c" __stringify(crm) \
681 ", " __stringify(op2) "\n" : "=r" (val)); \
682 ((struct coproc_reg *)r)->val = val; \
685 FUNCTION_FOR32(0, 0, 0, 0, MIDR
)
686 FUNCTION_FOR32(0, 0, 0, 1, CTR
)
687 FUNCTION_FOR32(0, 0, 0, 2, TCMTR
)
688 FUNCTION_FOR32(0, 0, 0, 3, TLBTR
)
689 FUNCTION_FOR32(0, 0, 0, 6, REVIDR
)
690 FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0
)
691 FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1
)
692 FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0
)
693 FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0
)
694 FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0
)
695 FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1
)
696 FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2
)
697 FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3
)
698 FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0
)
699 FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1
)
700 FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2
)
701 FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3
)
702 FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4
)
703 FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5
)
704 FUNCTION_FOR32(0, 0, 1, 1, CLIDR
)
705 FUNCTION_FOR32(0, 0, 1, 7, AIDR
)
707 /* ->val is filled in by kvm_invariant_coproc_table_init() */
708 static struct coproc_reg invariant_cp15
[] = {
709 { CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32
, NULL
, get_MIDR
},
710 { CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32
, NULL
, get_CTR
},
711 { CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32
, NULL
, get_TCMTR
},
712 { CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32
, NULL
, get_TLBTR
},
713 { CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32
, NULL
, get_REVIDR
},
715 { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32
, NULL
, get_CLIDR
},
716 { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32
, NULL
, get_AIDR
},
718 { CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32
, NULL
, get_ID_PFR0
},
719 { CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32
, NULL
, get_ID_PFR1
},
720 { CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32
, NULL
, get_ID_DFR0
},
721 { CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32
, NULL
, get_ID_AFR0
},
722 { CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32
, NULL
, get_ID_MMFR0
},
723 { CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32
, NULL
, get_ID_MMFR1
},
724 { CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32
, NULL
, get_ID_MMFR2
},
725 { CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32
, NULL
, get_ID_MMFR3
},
727 { CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32
, NULL
, get_ID_ISAR0
},
728 { CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32
, NULL
, get_ID_ISAR1
},
729 { CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32
, NULL
, get_ID_ISAR2
},
730 { CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32
, NULL
, get_ID_ISAR3
},
731 { CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32
, NULL
, get_ID_ISAR4
},
732 { CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32
, NULL
, get_ID_ISAR5
},
736 * Reads a register value from a userspace address to a kernel
737 * variable. Make sure that register size matches sizeof(*__val).
739 static int reg_from_user(void *val
, const void __user
*uaddr
, u64 id
)
741 if (copy_from_user(val
, uaddr
, KVM_REG_SIZE(id
)) != 0)
747 * Writes a register value to a userspace address from a kernel variable.
748 * Make sure that register size matches sizeof(*__val).
750 static int reg_to_user(void __user
*uaddr
, const void *val
, u64 id
)
752 if (copy_to_user(uaddr
, val
, KVM_REG_SIZE(id
)) != 0)
757 static int get_invariant_cp15(u64 id
, void __user
*uaddr
)
759 struct coproc_params params
;
760 const struct coproc_reg
*r
;
763 if (!index_to_params(id
, ¶ms
))
766 r
= find_reg(¶ms
, invariant_cp15
, ARRAY_SIZE(invariant_cp15
));
771 if (KVM_REG_SIZE(id
) == 4) {
774 ret
= reg_to_user(uaddr
, &val
, id
);
775 } else if (KVM_REG_SIZE(id
) == 8) {
776 ret
= reg_to_user(uaddr
, &r
->val
, id
);
781 static int set_invariant_cp15(u64 id
, void __user
*uaddr
)
783 struct coproc_params params
;
784 const struct coproc_reg
*r
;
788 if (!index_to_params(id
, ¶ms
))
790 r
= find_reg(¶ms
, invariant_cp15
, ARRAY_SIZE(invariant_cp15
));
795 if (KVM_REG_SIZE(id
) == 4) {
798 err
= reg_from_user(&val32
, uaddr
, id
);
801 } else if (KVM_REG_SIZE(id
) == 8) {
802 err
= reg_from_user(&val
, uaddr
, id
);
807 /* This is what we mean by invariant: you can't change it. */
814 static bool is_valid_cache(u32 val
)
818 if (val
>= CSSELR_MAX
)
821 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
823 ctype
= (cache_levels
>> (level
* 3)) & 7;
826 case 0: /* No cache */
828 case 1: /* Instruction cache only */
830 case 2: /* Data cache only */
831 case 4: /* Unified cache */
833 case 3: /* Separate instruction and data caches */
835 default: /* Reserved: we can't know instruction or data. */
840 /* Which cache CCSIDR represents depends on CSSELR value. */
841 static u32
get_ccsidr(u32 csselr
)
845 /* Make sure noone else changes CSSELR during this! */
847 /* Put value into CSSELR */
848 asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr
));
850 /* Read result out of CCSIDR */
851 asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr
));
857 static int demux_c15_get(u64 id
, void __user
*uaddr
)
860 u32 __user
*uval
= uaddr
;
862 /* Fail if we have unknown bits set. */
863 if (id
& ~(KVM_REG_ARCH_MASK
|KVM_REG_SIZE_MASK
|KVM_REG_ARM_COPROC_MASK
864 | ((1 << KVM_REG_ARM_COPROC_SHIFT
)-1)))
867 switch (id
& KVM_REG_ARM_DEMUX_ID_MASK
) {
868 case KVM_REG_ARM_DEMUX_ID_CCSIDR
:
869 if (KVM_REG_SIZE(id
) != 4)
871 val
= (id
& KVM_REG_ARM_DEMUX_VAL_MASK
)
872 >> KVM_REG_ARM_DEMUX_VAL_SHIFT
;
873 if (!is_valid_cache(val
))
876 return put_user(get_ccsidr(val
), uval
);
882 static int demux_c15_set(u64 id
, void __user
*uaddr
)
885 u32 __user
*uval
= uaddr
;
887 /* Fail if we have unknown bits set. */
888 if (id
& ~(KVM_REG_ARCH_MASK
|KVM_REG_SIZE_MASK
|KVM_REG_ARM_COPROC_MASK
889 | ((1 << KVM_REG_ARM_COPROC_SHIFT
)-1)))
892 switch (id
& KVM_REG_ARM_DEMUX_ID_MASK
) {
893 case KVM_REG_ARM_DEMUX_ID_CCSIDR
:
894 if (KVM_REG_SIZE(id
) != 4)
896 val
= (id
& KVM_REG_ARM_DEMUX_VAL_MASK
)
897 >> KVM_REG_ARM_DEMUX_VAL_SHIFT
;
898 if (!is_valid_cache(val
))
901 if (get_user(newval
, uval
))
904 /* This is also invariant: you can't change it. */
905 if (newval
!= get_ccsidr(val
))
914 static const int vfp_sysregs
[] = { KVM_REG_ARM_VFP_FPEXC
,
915 KVM_REG_ARM_VFP_FPSCR
,
916 KVM_REG_ARM_VFP_FPINST
,
917 KVM_REG_ARM_VFP_FPINST2
,
918 KVM_REG_ARM_VFP_MVFR0
,
919 KVM_REG_ARM_VFP_MVFR1
,
920 KVM_REG_ARM_VFP_FPSID
};
922 static unsigned int num_fp_regs(void)
924 if (((fmrx(MVFR0
) & MVFR0_A_SIMD_MASK
) >> MVFR0_A_SIMD_BIT
) == 2)
930 static unsigned int num_vfp_regs(void)
932 /* Normal FP regs + control regs. */
933 return num_fp_regs() + ARRAY_SIZE(vfp_sysregs
);
936 static int copy_vfp_regids(u64 __user
*uindices
)
939 const u64 u32reg
= KVM_REG_ARM
| KVM_REG_SIZE_U32
| KVM_REG_ARM_VFP
;
940 const u64 u64reg
= KVM_REG_ARM
| KVM_REG_SIZE_U64
| KVM_REG_ARM_VFP
;
942 for (i
= 0; i
< num_fp_regs(); i
++) {
943 if (put_user((u64reg
| KVM_REG_ARM_VFP_BASE_REG
) + i
,
949 for (i
= 0; i
< ARRAY_SIZE(vfp_sysregs
); i
++) {
950 if (put_user(u32reg
| vfp_sysregs
[i
], uindices
))
955 return num_vfp_regs();
958 static int vfp_get_reg(const struct kvm_vcpu
*vcpu
, u64 id
, void __user
*uaddr
)
960 u32 vfpid
= (id
& KVM_REG_ARM_VFP_MASK
);
963 /* Fail if we have unknown bits set. */
964 if (id
& ~(KVM_REG_ARCH_MASK
|KVM_REG_SIZE_MASK
|KVM_REG_ARM_COPROC_MASK
965 | ((1 << KVM_REG_ARM_COPROC_SHIFT
)-1)))
968 if (vfpid
< num_fp_regs()) {
969 if (KVM_REG_SIZE(id
) != 8)
971 return reg_to_user(uaddr
, &vcpu
->arch
.ctxt
.vfp
.fpregs
[vfpid
],
975 /* FP control registers are all 32 bit. */
976 if (KVM_REG_SIZE(id
) != 4)
980 case KVM_REG_ARM_VFP_FPEXC
:
981 return reg_to_user(uaddr
, &vcpu
->arch
.ctxt
.vfp
.fpexc
, id
);
982 case KVM_REG_ARM_VFP_FPSCR
:
983 return reg_to_user(uaddr
, &vcpu
->arch
.ctxt
.vfp
.fpscr
, id
);
984 case KVM_REG_ARM_VFP_FPINST
:
985 return reg_to_user(uaddr
, &vcpu
->arch
.ctxt
.vfp
.fpinst
, id
);
986 case KVM_REG_ARM_VFP_FPINST2
:
987 return reg_to_user(uaddr
, &vcpu
->arch
.ctxt
.vfp
.fpinst2
, id
);
988 case KVM_REG_ARM_VFP_MVFR0
:
990 return reg_to_user(uaddr
, &val
, id
);
991 case KVM_REG_ARM_VFP_MVFR1
:
993 return reg_to_user(uaddr
, &val
, id
);
994 case KVM_REG_ARM_VFP_FPSID
:
996 return reg_to_user(uaddr
, &val
, id
);
1002 static int vfp_set_reg(struct kvm_vcpu
*vcpu
, u64 id
, const void __user
*uaddr
)
1004 u32 vfpid
= (id
& KVM_REG_ARM_VFP_MASK
);
1007 /* Fail if we have unknown bits set. */
1008 if (id
& ~(KVM_REG_ARCH_MASK
|KVM_REG_SIZE_MASK
|KVM_REG_ARM_COPROC_MASK
1009 | ((1 << KVM_REG_ARM_COPROC_SHIFT
)-1)))
1012 if (vfpid
< num_fp_regs()) {
1013 if (KVM_REG_SIZE(id
) != 8)
1015 return reg_from_user(&vcpu
->arch
.ctxt
.vfp
.fpregs
[vfpid
],
1019 /* FP control registers are all 32 bit. */
1020 if (KVM_REG_SIZE(id
) != 4)
1024 case KVM_REG_ARM_VFP_FPEXC
:
1025 return reg_from_user(&vcpu
->arch
.ctxt
.vfp
.fpexc
, uaddr
, id
);
1026 case KVM_REG_ARM_VFP_FPSCR
:
1027 return reg_from_user(&vcpu
->arch
.ctxt
.vfp
.fpscr
, uaddr
, id
);
1028 case KVM_REG_ARM_VFP_FPINST
:
1029 return reg_from_user(&vcpu
->arch
.ctxt
.vfp
.fpinst
, uaddr
, id
);
1030 case KVM_REG_ARM_VFP_FPINST2
:
1031 return reg_from_user(&vcpu
->arch
.ctxt
.vfp
.fpinst2
, uaddr
, id
);
1032 /* These are invariant. */
1033 case KVM_REG_ARM_VFP_MVFR0
:
1034 if (reg_from_user(&val
, uaddr
, id
))
1036 if (val
!= fmrx(MVFR0
))
1039 case KVM_REG_ARM_VFP_MVFR1
:
1040 if (reg_from_user(&val
, uaddr
, id
))
1042 if (val
!= fmrx(MVFR1
))
1045 case KVM_REG_ARM_VFP_FPSID
:
1046 if (reg_from_user(&val
, uaddr
, id
))
1048 if (val
!= fmrx(FPSID
))
1055 #else /* !CONFIG_VFPv3 */
1056 static unsigned int num_vfp_regs(void)
1061 static int copy_vfp_regids(u64 __user
*uindices
)
1066 static int vfp_get_reg(const struct kvm_vcpu
*vcpu
, u64 id
, void __user
*uaddr
)
1071 static int vfp_set_reg(struct kvm_vcpu
*vcpu
, u64 id
, const void __user
*uaddr
)
1075 #endif /* !CONFIG_VFPv3 */
1077 int kvm_arm_coproc_get_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
1079 const struct coproc_reg
*r
;
1080 void __user
*uaddr
= (void __user
*)(long)reg
->addr
;
1083 if ((reg
->id
& KVM_REG_ARM_COPROC_MASK
) == KVM_REG_ARM_DEMUX
)
1084 return demux_c15_get(reg
->id
, uaddr
);
1086 if ((reg
->id
& KVM_REG_ARM_COPROC_MASK
) == KVM_REG_ARM_VFP
)
1087 return vfp_get_reg(vcpu
, reg
->id
, uaddr
);
1089 r
= index_to_coproc_reg(vcpu
, reg
->id
);
1091 return get_invariant_cp15(reg
->id
, uaddr
);
1094 if (KVM_REG_SIZE(reg
->id
) == 8) {
1097 val
= vcpu_cp15_reg64_get(vcpu
, r
);
1098 ret
= reg_to_user(uaddr
, &val
, reg
->id
);
1099 } else if (KVM_REG_SIZE(reg
->id
) == 4) {
1100 ret
= reg_to_user(uaddr
, &vcpu_cp15(vcpu
, r
->reg
), reg
->id
);
1106 int kvm_arm_coproc_set_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
1108 const struct coproc_reg
*r
;
1109 void __user
*uaddr
= (void __user
*)(long)reg
->addr
;
1112 if ((reg
->id
& KVM_REG_ARM_COPROC_MASK
) == KVM_REG_ARM_DEMUX
)
1113 return demux_c15_set(reg
->id
, uaddr
);
1115 if ((reg
->id
& KVM_REG_ARM_COPROC_MASK
) == KVM_REG_ARM_VFP
)
1116 return vfp_set_reg(vcpu
, reg
->id
, uaddr
);
1118 r
= index_to_coproc_reg(vcpu
, reg
->id
);
1120 return set_invariant_cp15(reg
->id
, uaddr
);
1123 if (KVM_REG_SIZE(reg
->id
) == 8) {
1126 ret
= reg_from_user(&val
, uaddr
, reg
->id
);
1128 vcpu_cp15_reg64_set(vcpu
, r
, val
);
1129 } else if (KVM_REG_SIZE(reg
->id
) == 4) {
1130 ret
= reg_from_user(&vcpu_cp15(vcpu
, r
->reg
), uaddr
, reg
->id
);
1136 static unsigned int num_demux_regs(void)
1138 unsigned int i
, count
= 0;
1140 for (i
= 0; i
< CSSELR_MAX
; i
++)
1141 if (is_valid_cache(i
))
1147 static int write_demux_regids(u64 __user
*uindices
)
1149 u64 val
= KVM_REG_ARM
| KVM_REG_SIZE_U32
| KVM_REG_ARM_DEMUX
;
1152 val
|= KVM_REG_ARM_DEMUX_ID_CCSIDR
;
1153 for (i
= 0; i
< CSSELR_MAX
; i
++) {
1154 if (!is_valid_cache(i
))
1156 if (put_user(val
| i
, uindices
))
1163 static u64
cp15_to_index(const struct coproc_reg
*reg
)
1165 u64 val
= KVM_REG_ARM
| (15 << KVM_REG_ARM_COPROC_SHIFT
);
1166 if (reg
->is_64bit
) {
1167 val
|= KVM_REG_SIZE_U64
;
1168 val
|= (reg
->Op1
<< KVM_REG_ARM_OPC1_SHIFT
);
1170 * CRn always denotes the primary coproc. reg. nr. for the
1171 * in-kernel representation, but the user space API uses the
1172 * CRm for the encoding, because it is modelled after the
1173 * MRRC/MCRR instructions: see the ARM ARM rev. c page
1176 val
|= (reg
->CRn
<< KVM_REG_ARM_CRM_SHIFT
);
1178 val
|= KVM_REG_SIZE_U32
;
1179 val
|= (reg
->Op1
<< KVM_REG_ARM_OPC1_SHIFT
);
1180 val
|= (reg
->Op2
<< KVM_REG_ARM_32_OPC2_SHIFT
);
1181 val
|= (reg
->CRm
<< KVM_REG_ARM_CRM_SHIFT
);
1182 val
|= (reg
->CRn
<< KVM_REG_ARM_32_CRN_SHIFT
);
1187 static bool copy_reg_to_user(const struct coproc_reg
*reg
, u64 __user
**uind
)
1192 if (put_user(cp15_to_index(reg
), *uind
))
1199 /* Assumed ordered tables, see kvm_coproc_table_init. */
1200 static int walk_cp15(struct kvm_vcpu
*vcpu
, u64 __user
*uind
)
1202 const struct coproc_reg
*i1
, *i2
, *end1
, *end2
;
1203 unsigned int total
= 0;
1206 /* We check for duplicates here, to allow arch-specific overrides. */
1207 i1
= get_target_table(vcpu
->arch
.target
, &num
);
1210 end2
= cp15_regs
+ ARRAY_SIZE(cp15_regs
);
1212 BUG_ON(i1
== end1
|| i2
== end2
);
1214 /* Walk carefully, as both tables may refer to the same register. */
1216 int cmp
= cmp_reg(i1
, i2
);
1217 /* target-specific overrides generic entry. */
1219 /* Ignore registers we trap but don't save. */
1221 if (!copy_reg_to_user(i1
, &uind
))
1226 /* Ignore registers we trap but don't save. */
1228 if (!copy_reg_to_user(i2
, &uind
))
1234 if (cmp
<= 0 && ++i1
== end1
)
1236 if (cmp
>= 0 && ++i2
== end2
)
1242 unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu
*vcpu
)
1244 return ARRAY_SIZE(invariant_cp15
)
1247 + walk_cp15(vcpu
, (u64 __user
*)NULL
);
1250 int kvm_arm_copy_coproc_indices(struct kvm_vcpu
*vcpu
, u64 __user
*uindices
)
1255 /* Then give them all the invariant registers' indices. */
1256 for (i
= 0; i
< ARRAY_SIZE(invariant_cp15
); i
++) {
1257 if (put_user(cp15_to_index(&invariant_cp15
[i
]), uindices
))
1262 err
= walk_cp15(vcpu
, uindices
);
1267 err
= copy_vfp_regids(uindices
);
1272 return write_demux_regids(uindices
);
1275 void kvm_coproc_table_init(void)
1279 /* Make sure tables are unique and in order. */
1280 BUG_ON(check_reg_table(cp15_regs
, ARRAY_SIZE(cp15_regs
)));
1281 BUG_ON(check_reg_table(invariant_cp15
, ARRAY_SIZE(invariant_cp15
)));
1283 /* We abuse the reset function to overwrite the table itself. */
1284 for (i
= 0; i
< ARRAY_SIZE(invariant_cp15
); i
++)
1285 invariant_cp15
[i
].reset(NULL
, &invariant_cp15
[i
]);
1288 * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
1290 * If software reads the Cache Type fields from Ctype1
1291 * upwards, once it has seen a value of 0b000, no caches
1292 * exist at further-out levels of the hierarchy. So, for
1293 * example, if Ctype3 is the first Cache Type field with a
1294 * value of 0b000, the values of Ctype4 to Ctype7 must be
1297 asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (cache_levels
));
1298 for (i
= 0; i
< 7; i
++)
1299 if (((cache_levels
>> (i
*3)) & 7) == 0)
1301 /* Clear all higher bits. */
1302 cache_levels
&= (1 << (i
*3))-1;
1306 * kvm_reset_coprocs - sets cp15 registers to reset value
1307 * @vcpu: The VCPU pointer
1309 * This function finds the right table above and sets the registers on the
1310 * virtual CPU struct to their architecturally defined reset values.
1312 void kvm_reset_coprocs(struct kvm_vcpu
*vcpu
)
1315 const struct coproc_reg
*table
;
1317 /* Catch someone adding a register without putting in reset entry. */
1318 memset(vcpu
->arch
.ctxt
.cp15
, 0x42, sizeof(vcpu
->arch
.ctxt
.cp15
));
1320 /* Generic chip reset first (so target could override). */
1321 reset_coproc_regs(vcpu
, cp15_regs
, ARRAY_SIZE(cp15_regs
));
1323 table
= get_target_table(vcpu
->arch
.target
, &num
);
1324 reset_coproc_regs(vcpu
, table
, num
);
1326 for (num
= 1; num
< NR_CP15_REGS
; num
++)
1327 if (vcpu_cp15(vcpu
, num
) == 0x42424242)
1328 panic("Didn't reset vcpu_cp15(vcpu, %zi)", num
);