]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/arm64/include/asm/kvm_host.h
Merge branch 'md-next' into md-linus
[mirror_ubuntu-zesty-kernel.git] / arch / arm64 / include / asm / kvm_host.h
1 /*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/include/asm/kvm_host.h:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22 #ifndef __ARM64_KVM_HOST_H__
23 #define __ARM64_KVM_HOST_H__
24
25 #include <linux/types.h>
26 #include <linux/kvm_types.h>
27 #include <asm/kvm.h>
28 #include <asm/kvm_asm.h>
29 #include <asm/kvm_mmio.h>
30
31 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
32
33 #define KVM_USER_MEM_SLOTS 32
34 #define KVM_PRIVATE_MEM_SLOTS 4
35 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
36 #define KVM_HALT_POLL_NS_DEFAULT 500000
37
38 #include <kvm/arm_vgic.h>
39 #include <kvm/arm_arch_timer.h>
40 #include <kvm/arm_pmu.h>
41
42 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
43
44 #define KVM_VCPU_MAX_FEATURES 4
45
46 #define KVM_REQ_VCPU_EXIT 8
47
48 int __attribute_const__ kvm_target_cpu(void);
49 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
50 int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext);
51 void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start);
52
53 struct kvm_arch {
54 /* The VMID generation used for the virt. memory system */
55 u64 vmid_gen;
56 u32 vmid;
57
58 /* 1-level 2nd stage table and lock */
59 spinlock_t pgd_lock;
60 pgd_t *pgd;
61
62 /* VTTBR value associated with above pgd and vmid */
63 u64 vttbr;
64
65 /* The last vcpu id that ran on each physical CPU */
66 int __percpu *last_vcpu_ran;
67
68 /* The maximum number of vCPUs depends on the used GIC model */
69 int max_vcpus;
70
71 /* Interrupt controller */
72 struct vgic_dist vgic;
73
74 /* Timer */
75 struct arch_timer_kvm timer;
76 };
77
78 #define KVM_NR_MEM_OBJS 40
79
80 /*
81 * We don't want allocation failures within the mmu code, so we preallocate
82 * enough memory for a single page fault in a cache.
83 */
84 struct kvm_mmu_memory_cache {
85 int nobjs;
86 void *objects[KVM_NR_MEM_OBJS];
87 };
88
89 struct kvm_vcpu_fault_info {
90 u32 esr_el2; /* Hyp Syndrom Register */
91 u64 far_el2; /* Hyp Fault Address Register */
92 u64 hpfar_el2; /* Hyp IPA Fault Address Register */
93 };
94
95 /*
96 * 0 is reserved as an invalid value.
97 * Order should be kept in sync with the save/restore code.
98 */
99 enum vcpu_sysreg {
100 __INVALID_SYSREG__,
101 MPIDR_EL1, /* MultiProcessor Affinity Register */
102 CSSELR_EL1, /* Cache Size Selection Register */
103 SCTLR_EL1, /* System Control Register */
104 ACTLR_EL1, /* Auxiliary Control Register */
105 CPACR_EL1, /* Coprocessor Access Control */
106 TTBR0_EL1, /* Translation Table Base Register 0 */
107 TTBR1_EL1, /* Translation Table Base Register 1 */
108 TCR_EL1, /* Translation Control Register */
109 ESR_EL1, /* Exception Syndrome Register */
110 AFSR0_EL1, /* Auxiliary Fault Status Register 0 */
111 AFSR1_EL1, /* Auxiliary Fault Status Register 1 */
112 FAR_EL1, /* Fault Address Register */
113 MAIR_EL1, /* Memory Attribute Indirection Register */
114 VBAR_EL1, /* Vector Base Address Register */
115 CONTEXTIDR_EL1, /* Context ID Register */
116 TPIDR_EL0, /* Thread ID, User R/W */
117 TPIDRRO_EL0, /* Thread ID, User R/O */
118 TPIDR_EL1, /* Thread ID, Privileged */
119 AMAIR_EL1, /* Aux Memory Attribute Indirection Register */
120 CNTKCTL_EL1, /* Timer Control Register (EL1) */
121 PAR_EL1, /* Physical Address Register */
122 MDSCR_EL1, /* Monitor Debug System Control Register */
123 MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */
124
125 /* Performance Monitors Registers */
126 PMCR_EL0, /* Control Register */
127 PMSELR_EL0, /* Event Counter Selection Register */
128 PMEVCNTR0_EL0, /* Event Counter Register (0-30) */
129 PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
130 PMCCNTR_EL0, /* Cycle Counter Register */
131 PMEVTYPER0_EL0, /* Event Type Register (0-30) */
132 PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
133 PMCCFILTR_EL0, /* Cycle Count Filter Register */
134 PMCNTENSET_EL0, /* Count Enable Set Register */
135 PMINTENSET_EL1, /* Interrupt Enable Set Register */
136 PMOVSSET_EL0, /* Overflow Flag Status Set Register */
137 PMSWINC_EL0, /* Software Increment Register */
138 PMUSERENR_EL0, /* User Enable Register */
139
140 /* 32bit specific registers. Keep them at the end of the range */
141 DACR32_EL2, /* Domain Access Control Register */
142 IFSR32_EL2, /* Instruction Fault Status Register */
143 FPEXC32_EL2, /* Floating-Point Exception Control Register */
144 DBGVCR32_EL2, /* Debug Vector Catch Register */
145
146 NR_SYS_REGS /* Nothing after this line! */
147 };
148
149 /* 32bit mapping */
150 #define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
151 #define c0_CSSELR (CSSELR_EL1 * 2)/* Cache Size Selection Register */
152 #define c1_SCTLR (SCTLR_EL1 * 2) /* System Control Register */
153 #define c1_ACTLR (ACTLR_EL1 * 2) /* Auxiliary Control Register */
154 #define c1_CPACR (CPACR_EL1 * 2) /* Coprocessor Access Control */
155 #define c2_TTBR0 (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */
156 #define c2_TTBR0_high (c2_TTBR0 + 1) /* TTBR0 top 32 bits */
157 #define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */
158 #define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */
159 #define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */
160 #define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */
161 #define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */
162 #define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */
163 #define c5_ADFSR (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */
164 #define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
165 #define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */
166 #define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
167 #define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */
168 #define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */
169 #define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
170 #define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
171 #define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
172 #define c13_CID (CONTEXTIDR_EL1 * 2) /* Context ID Register */
173 #define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */
174 #define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
175 #define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */
176 #define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
177 #define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
178 #define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
179
180 #define cp14_DBGDSCRext (MDSCR_EL1 * 2)
181 #define cp14_DBGBCR0 (DBGBCR0_EL1 * 2)
182 #define cp14_DBGBVR0 (DBGBVR0_EL1 * 2)
183 #define cp14_DBGBXVR0 (cp14_DBGBVR0 + 1)
184 #define cp14_DBGWCR0 (DBGWCR0_EL1 * 2)
185 #define cp14_DBGWVR0 (DBGWVR0_EL1 * 2)
186 #define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
187
188 #define NR_COPRO_REGS (NR_SYS_REGS * 2)
189
190 struct kvm_cpu_context {
191 struct kvm_regs gp_regs;
192 union {
193 u64 sys_regs[NR_SYS_REGS];
194 u32 copro[NR_COPRO_REGS];
195 };
196 };
197
198 typedef struct kvm_cpu_context kvm_cpu_context_t;
199
200 struct kvm_vcpu_arch {
201 struct kvm_cpu_context ctxt;
202
203 /* HYP configuration */
204 u64 hcr_el2;
205 u32 mdcr_el2;
206
207 /* Exception Information */
208 struct kvm_vcpu_fault_info fault;
209
210 /* Guest debug state */
211 u64 debug_flags;
212
213 /*
214 * We maintain more than a single set of debug registers to support
215 * debugging the guest from the host and to maintain separate host and
216 * guest state during world switches. vcpu_debug_state are the debug
217 * registers of the vcpu as the guest sees them. host_debug_state are
218 * the host registers which are saved and restored during
219 * world switches. external_debug_state contains the debug
220 * values we want to debug the guest. This is set via the
221 * KVM_SET_GUEST_DEBUG ioctl.
222 *
223 * debug_ptr points to the set of debug registers that should be loaded
224 * onto the hardware when running the guest.
225 */
226 struct kvm_guest_debug_arch *debug_ptr;
227 struct kvm_guest_debug_arch vcpu_debug_state;
228 struct kvm_guest_debug_arch external_debug_state;
229
230 /* Pointer to host CPU context */
231 kvm_cpu_context_t *host_cpu_context;
232 struct kvm_guest_debug_arch host_debug_state;
233
234 /* VGIC state */
235 struct vgic_cpu vgic_cpu;
236 struct arch_timer_cpu timer_cpu;
237 struct kvm_pmu pmu;
238
239 /*
240 * Anything that is not used directly from assembly code goes
241 * here.
242 */
243
244 /*
245 * Guest registers we preserve during guest debugging.
246 *
247 * These shadow registers are updated by the kvm_handle_sys_reg
248 * trap handler if the guest accesses or updates them while we
249 * are using guest debug.
250 */
251 struct {
252 u32 mdscr_el1;
253 } guest_debug_preserved;
254
255 /* vcpu power-off state */
256 bool power_off;
257
258 /* Don't run the guest (internal implementation need) */
259 bool pause;
260
261 /* IO related fields */
262 struct kvm_decode mmio_decode;
263
264 /* Interrupt related fields */
265 u64 irq_lines; /* IRQ and FIQ levels */
266
267 /* Cache some mmu pages needed inside spinlock regions */
268 struct kvm_mmu_memory_cache mmu_page_cache;
269
270 /* Target CPU and feature flags */
271 int target;
272 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
273
274 /* Detect first run of a vcpu */
275 bool has_run_once;
276 };
277
278 #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
279 #define vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)])
280 /*
281 * CP14 and CP15 live in the same array, as they are backed by the
282 * same system registers.
283 */
284 #define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)])
285 #define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)])
286
287 #ifdef CONFIG_CPU_BIG_ENDIAN
288 #define vcpu_cp15_64_high(v,r) vcpu_cp15((v),(r))
289 #define vcpu_cp15_64_low(v,r) vcpu_cp15((v),(r) + 1)
290 #else
291 #define vcpu_cp15_64_high(v,r) vcpu_cp15((v),(r) + 1)
292 #define vcpu_cp15_64_low(v,r) vcpu_cp15((v),(r))
293 #endif
294
295 struct kvm_vm_stat {
296 ulong remote_tlb_flush;
297 };
298
299 struct kvm_vcpu_stat {
300 u64 halt_successful_poll;
301 u64 halt_attempted_poll;
302 u64 halt_poll_invalid;
303 u64 halt_wakeup;
304 u64 hvc_exit_stat;
305 u64 wfe_exit_stat;
306 u64 wfi_exit_stat;
307 u64 mmio_exit_user;
308 u64 mmio_exit_kernel;
309 u64 exits;
310 };
311
312 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
313 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
314 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
315 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
316 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
317
318 #define KVM_ARCH_WANT_MMU_NOTIFIER
319 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
320 int kvm_unmap_hva_range(struct kvm *kvm,
321 unsigned long start, unsigned long end);
322 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
323 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
324 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
325
326 /* We do not have shadow page tables, hence the empty hooks */
327 static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
328 unsigned long address)
329 {
330 }
331
332 struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
333 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
334 void kvm_arm_halt_guest(struct kvm *kvm);
335 void kvm_arm_resume_guest(struct kvm *kvm);
336 void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu);
337 void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu);
338
339 u64 __kvm_call_hyp(void *hypfn, ...);
340 #define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__)
341
342 void force_vm_exit(const cpumask_t *mask);
343 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
344
345 int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
346 int exception_index);
347
348 int kvm_perf_init(void);
349 int kvm_perf_teardown(void);
350
351 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
352
353 static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
354 unsigned long hyp_stack_ptr,
355 unsigned long vector_ptr)
356 {
357 /*
358 * Call initialization code, and switch to the full blown
359 * HYP code.
360 */
361 __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
362 }
363
364 void __kvm_hyp_teardown(void);
365 static inline void __cpu_reset_hyp_mode(unsigned long vector_ptr,
366 phys_addr_t phys_idmap_start)
367 {
368 kvm_call_hyp(__kvm_hyp_teardown, phys_idmap_start);
369 }
370
371 static inline void kvm_arch_hardware_unsetup(void) {}
372 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
373 static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
374 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
375 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
376
377 void kvm_arm_init_debug(void);
378 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
379 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
380 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
381 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
382 struct kvm_device_attr *attr);
383 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
384 struct kvm_device_attr *attr);
385 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
386 struct kvm_device_attr *attr);
387
388 static inline void __cpu_init_stage2(void)
389 {
390 u32 parange = kvm_call_hyp(__init_stage2_translation);
391
392 WARN_ONCE(parange < 40,
393 "PARange is %d bits, unsupported configuration!", parange);
394 }
395
396 #endif /* __ARM64_KVM_HOST_H__ */