]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/arm64/include/asm/kvm_host.h
KVM: improve arch vcpu request defining
[mirror_ubuntu-artful-kernel.git] / arch / arm64 / include / asm / kvm_host.h
1 /*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/include/asm/kvm_host.h:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22 #ifndef __ARM64_KVM_HOST_H__
23 #define __ARM64_KVM_HOST_H__
24
25 #include <linux/types.h>
26 #include <linux/kvm_types.h>
27 #include <asm/kvm.h>
28 #include <asm/kvm_asm.h>
29 #include <asm/kvm_mmio.h>
30
31 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
32
33 #define KVM_USER_MEM_SLOTS 512
34 #define KVM_HALT_POLL_NS_DEFAULT 500000
35
36 #include <kvm/arm_vgic.h>
37 #include <kvm/arm_arch_timer.h>
38 #include <kvm/arm_pmu.h>
39
40 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
41
42 #define KVM_VCPU_MAX_FEATURES 4
43
44 #define KVM_REQ_VCPU_EXIT \
45 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
46
47 int __attribute_const__ kvm_target_cpu(void);
48 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
49 int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext);
50 void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start);
51
52 struct kvm_arch {
53 /* The VMID generation used for the virt. memory system */
54 u64 vmid_gen;
55 u32 vmid;
56
57 /* 1-level 2nd stage table and lock */
58 spinlock_t pgd_lock;
59 pgd_t *pgd;
60
61 /* VTTBR value associated with above pgd and vmid */
62 u64 vttbr;
63
64 /* The last vcpu id that ran on each physical CPU */
65 int __percpu *last_vcpu_ran;
66
67 /* The maximum number of vCPUs depends on the used GIC model */
68 int max_vcpus;
69
70 /* Interrupt controller */
71 struct vgic_dist vgic;
72 };
73
74 #define KVM_NR_MEM_OBJS 40
75
76 /*
77 * We don't want allocation failures within the mmu code, so we preallocate
78 * enough memory for a single page fault in a cache.
79 */
80 struct kvm_mmu_memory_cache {
81 int nobjs;
82 void *objects[KVM_NR_MEM_OBJS];
83 };
84
85 struct kvm_vcpu_fault_info {
86 u32 esr_el2; /* Hyp Syndrom Register */
87 u64 far_el2; /* Hyp Fault Address Register */
88 u64 hpfar_el2; /* Hyp IPA Fault Address Register */
89 };
90
91 /*
92 * 0 is reserved as an invalid value.
93 * Order should be kept in sync with the save/restore code.
94 */
95 enum vcpu_sysreg {
96 __INVALID_SYSREG__,
97 MPIDR_EL1, /* MultiProcessor Affinity Register */
98 CSSELR_EL1, /* Cache Size Selection Register */
99 SCTLR_EL1, /* System Control Register */
100 ACTLR_EL1, /* Auxiliary Control Register */
101 CPACR_EL1, /* Coprocessor Access Control */
102 TTBR0_EL1, /* Translation Table Base Register 0 */
103 TTBR1_EL1, /* Translation Table Base Register 1 */
104 TCR_EL1, /* Translation Control Register */
105 ESR_EL1, /* Exception Syndrome Register */
106 AFSR0_EL1, /* Auxiliary Fault Status Register 0 */
107 AFSR1_EL1, /* Auxiliary Fault Status Register 1 */
108 FAR_EL1, /* Fault Address Register */
109 MAIR_EL1, /* Memory Attribute Indirection Register */
110 VBAR_EL1, /* Vector Base Address Register */
111 CONTEXTIDR_EL1, /* Context ID Register */
112 TPIDR_EL0, /* Thread ID, User R/W */
113 TPIDRRO_EL0, /* Thread ID, User R/O */
114 TPIDR_EL1, /* Thread ID, Privileged */
115 AMAIR_EL1, /* Aux Memory Attribute Indirection Register */
116 CNTKCTL_EL1, /* Timer Control Register (EL1) */
117 PAR_EL1, /* Physical Address Register */
118 MDSCR_EL1, /* Monitor Debug System Control Register */
119 MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */
120
121 /* Performance Monitors Registers */
122 PMCR_EL0, /* Control Register */
123 PMSELR_EL0, /* Event Counter Selection Register */
124 PMEVCNTR0_EL0, /* Event Counter Register (0-30) */
125 PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
126 PMCCNTR_EL0, /* Cycle Counter Register */
127 PMEVTYPER0_EL0, /* Event Type Register (0-30) */
128 PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
129 PMCCFILTR_EL0, /* Cycle Count Filter Register */
130 PMCNTENSET_EL0, /* Count Enable Set Register */
131 PMINTENSET_EL1, /* Interrupt Enable Set Register */
132 PMOVSSET_EL0, /* Overflow Flag Status Set Register */
133 PMSWINC_EL0, /* Software Increment Register */
134 PMUSERENR_EL0, /* User Enable Register */
135
136 /* 32bit specific registers. Keep them at the end of the range */
137 DACR32_EL2, /* Domain Access Control Register */
138 IFSR32_EL2, /* Instruction Fault Status Register */
139 FPEXC32_EL2, /* Floating-Point Exception Control Register */
140 DBGVCR32_EL2, /* Debug Vector Catch Register */
141
142 NR_SYS_REGS /* Nothing after this line! */
143 };
144
145 /* 32bit mapping */
146 #define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
147 #define c0_CSSELR (CSSELR_EL1 * 2)/* Cache Size Selection Register */
148 #define c1_SCTLR (SCTLR_EL1 * 2) /* System Control Register */
149 #define c1_ACTLR (ACTLR_EL1 * 2) /* Auxiliary Control Register */
150 #define c1_CPACR (CPACR_EL1 * 2) /* Coprocessor Access Control */
151 #define c2_TTBR0 (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */
152 #define c2_TTBR0_high (c2_TTBR0 + 1) /* TTBR0 top 32 bits */
153 #define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */
154 #define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */
155 #define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */
156 #define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */
157 #define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */
158 #define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */
159 #define c5_ADFSR (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */
160 #define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
161 #define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */
162 #define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
163 #define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */
164 #define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */
165 #define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
166 #define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
167 #define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
168 #define c13_CID (CONTEXTIDR_EL1 * 2) /* Context ID Register */
169 #define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */
170 #define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
171 #define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */
172 #define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
173 #define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
174 #define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
175
176 #define cp14_DBGDSCRext (MDSCR_EL1 * 2)
177 #define cp14_DBGBCR0 (DBGBCR0_EL1 * 2)
178 #define cp14_DBGBVR0 (DBGBVR0_EL1 * 2)
179 #define cp14_DBGBXVR0 (cp14_DBGBVR0 + 1)
180 #define cp14_DBGWCR0 (DBGWCR0_EL1 * 2)
181 #define cp14_DBGWVR0 (DBGWVR0_EL1 * 2)
182 #define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
183
184 #define NR_COPRO_REGS (NR_SYS_REGS * 2)
185
186 struct kvm_cpu_context {
187 struct kvm_regs gp_regs;
188 union {
189 u64 sys_regs[NR_SYS_REGS];
190 u32 copro[NR_COPRO_REGS];
191 };
192 };
193
194 typedef struct kvm_cpu_context kvm_cpu_context_t;
195
196 struct kvm_vcpu_arch {
197 struct kvm_cpu_context ctxt;
198
199 /* HYP configuration */
200 u64 hcr_el2;
201 u32 mdcr_el2;
202
203 /* Exception Information */
204 struct kvm_vcpu_fault_info fault;
205
206 /* Guest debug state */
207 u64 debug_flags;
208
209 /*
210 * We maintain more than a single set of debug registers to support
211 * debugging the guest from the host and to maintain separate host and
212 * guest state during world switches. vcpu_debug_state are the debug
213 * registers of the vcpu as the guest sees them. host_debug_state are
214 * the host registers which are saved and restored during
215 * world switches. external_debug_state contains the debug
216 * values we want to debug the guest. This is set via the
217 * KVM_SET_GUEST_DEBUG ioctl.
218 *
219 * debug_ptr points to the set of debug registers that should be loaded
220 * onto the hardware when running the guest.
221 */
222 struct kvm_guest_debug_arch *debug_ptr;
223 struct kvm_guest_debug_arch vcpu_debug_state;
224 struct kvm_guest_debug_arch external_debug_state;
225
226 /* Pointer to host CPU context */
227 kvm_cpu_context_t *host_cpu_context;
228 struct {
229 /* {Break,watch}point registers */
230 struct kvm_guest_debug_arch regs;
231 /* Statistical profiling extension */
232 u64 pmscr_el1;
233 } host_debug_state;
234
235 /* VGIC state */
236 struct vgic_cpu vgic_cpu;
237 struct arch_timer_cpu timer_cpu;
238 struct kvm_pmu pmu;
239
240 /*
241 * Anything that is not used directly from assembly code goes
242 * here.
243 */
244
245 /*
246 * Guest registers we preserve during guest debugging.
247 *
248 * These shadow registers are updated by the kvm_handle_sys_reg
249 * trap handler if the guest accesses or updates them while we
250 * are using guest debug.
251 */
252 struct {
253 u32 mdscr_el1;
254 } guest_debug_preserved;
255
256 /* vcpu power-off state */
257 bool power_off;
258
259 /* Don't run the guest (internal implementation need) */
260 bool pause;
261
262 /* IO related fields */
263 struct kvm_decode mmio_decode;
264
265 /* Interrupt related fields */
266 u64 irq_lines; /* IRQ and FIQ levels */
267
268 /* Cache some mmu pages needed inside spinlock regions */
269 struct kvm_mmu_memory_cache mmu_page_cache;
270
271 /* Target CPU and feature flags */
272 int target;
273 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
274
275 /* Detect first run of a vcpu */
276 bool has_run_once;
277 };
278
279 #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
280 #define vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)])
281 /*
282 * CP14 and CP15 live in the same array, as they are backed by the
283 * same system registers.
284 */
285 #define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)])
286 #define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)])
287
288 #ifdef CONFIG_CPU_BIG_ENDIAN
289 #define vcpu_cp15_64_high(v,r) vcpu_cp15((v),(r))
290 #define vcpu_cp15_64_low(v,r) vcpu_cp15((v),(r) + 1)
291 #else
292 #define vcpu_cp15_64_high(v,r) vcpu_cp15((v),(r) + 1)
293 #define vcpu_cp15_64_low(v,r) vcpu_cp15((v),(r))
294 #endif
295
296 struct kvm_vm_stat {
297 ulong remote_tlb_flush;
298 };
299
300 struct kvm_vcpu_stat {
301 u64 halt_successful_poll;
302 u64 halt_attempted_poll;
303 u64 halt_poll_invalid;
304 u64 halt_wakeup;
305 u64 hvc_exit_stat;
306 u64 wfe_exit_stat;
307 u64 wfi_exit_stat;
308 u64 mmio_exit_user;
309 u64 mmio_exit_kernel;
310 u64 exits;
311 };
312
313 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
314 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
315 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
316 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
317 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
318
319 #define KVM_ARCH_WANT_MMU_NOTIFIER
320 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
321 int kvm_unmap_hva_range(struct kvm *kvm,
322 unsigned long start, unsigned long end);
323 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
324 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
325 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
326
327 /* We do not have shadow page tables, hence the empty hooks */
328 static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
329 unsigned long address)
330 {
331 }
332
333 struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
334 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
335 void kvm_arm_halt_guest(struct kvm *kvm);
336 void kvm_arm_resume_guest(struct kvm *kvm);
337
338 u64 __kvm_call_hyp(void *hypfn, ...);
339 #define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__)
340
341 void force_vm_exit(const cpumask_t *mask);
342 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
343
344 int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
345 int exception_index);
346
347 int kvm_perf_init(void);
348 int kvm_perf_teardown(void);
349
350 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
351
352 static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
353 unsigned long hyp_stack_ptr,
354 unsigned long vector_ptr)
355 {
356 /*
357 * Call initialization code, and switch to the full blown
358 * HYP code.
359 */
360 __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
361 }
362
363 static inline void kvm_arch_hardware_unsetup(void) {}
364 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
365 static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
366 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
367 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
368
369 void kvm_arm_init_debug(void);
370 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
371 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
372 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
373 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
374 struct kvm_device_attr *attr);
375 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
376 struct kvm_device_attr *attr);
377 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
378 struct kvm_device_attr *attr);
379
380 static inline void __cpu_init_stage2(void)
381 {
382 u32 parange = kvm_call_hyp(__init_stage2_translation);
383
384 WARN_ONCE(parange < 40,
385 "PARange is %d bits, unsupported configuration!", parange);
386 }
387
388 #endif /* __ARM64_KVM_HOST_H__ */