]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/mips/include/asm/kvm_host.h
Merge tag 'mmc-v4.13-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
[mirror_ubuntu-artful-kernel.git] / arch / mips / include / asm / kvm_host.h
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7 * Authors: Sanjay Lal <sanjayl@kymasys.com>
8 */
9
10 #ifndef __MIPS_KVM_HOST_H__
11 #define __MIPS_KVM_HOST_H__
12
13 #include <linux/cpumask.h>
14 #include <linux/mutex.h>
15 #include <linux/hrtimer.h>
16 #include <linux/interrupt.h>
17 #include <linux/types.h>
18 #include <linux/kvm.h>
19 #include <linux/kvm_types.h>
20 #include <linux/threads.h>
21 #include <linux/spinlock.h>
22
23 #include <asm/inst.h>
24 #include <asm/mipsregs.h>
25
26 /* MIPS KVM register ids */
27 #define MIPS_CP0_32(_R, _S) \
28 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
29
30 #define MIPS_CP0_64(_R, _S) \
31 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
32
33 #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
34 #define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
35 #define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0)
36 #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
37 #define KVM_REG_MIPS_CP0_CONTEXTCONFIG MIPS_CP0_32(4, 1)
38 #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
39 #define KVM_REG_MIPS_CP0_XCONTEXTCONFIG MIPS_CP0_64(4, 3)
40 #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
41 #define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
42 #define KVM_REG_MIPS_CP0_SEGCTL0 MIPS_CP0_64(5, 2)
43 #define KVM_REG_MIPS_CP0_SEGCTL1 MIPS_CP0_64(5, 3)
44 #define KVM_REG_MIPS_CP0_SEGCTL2 MIPS_CP0_64(5, 4)
45 #define KVM_REG_MIPS_CP0_PWBASE MIPS_CP0_64(5, 5)
46 #define KVM_REG_MIPS_CP0_PWFIELD MIPS_CP0_64(5, 6)
47 #define KVM_REG_MIPS_CP0_PWSIZE MIPS_CP0_64(5, 7)
48 #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
49 #define KVM_REG_MIPS_CP0_PWCTL MIPS_CP0_32(6, 6)
50 #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
51 #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
52 #define KVM_REG_MIPS_CP0_BADINSTR MIPS_CP0_32(8, 1)
53 #define KVM_REG_MIPS_CP0_BADINSTRP MIPS_CP0_32(8, 2)
54 #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
55 #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
56 #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
57 #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
58 #define KVM_REG_MIPS_CP0_INTCTL MIPS_CP0_32(12, 1)
59 #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
60 #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
61 #define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0)
62 #define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
63 #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
64 #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
65 #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
66 #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
67 #define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
68 #define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
69 #define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
70 #define KVM_REG_MIPS_CP0_MAARI MIPS_CP0_64(17, 2)
71 #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
72 #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
73 #define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2)
74 #define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3)
75 #define KVM_REG_MIPS_CP0_KSCRATCH3 MIPS_CP0_64(31, 4)
76 #define KVM_REG_MIPS_CP0_KSCRATCH4 MIPS_CP0_64(31, 5)
77 #define KVM_REG_MIPS_CP0_KSCRATCH5 MIPS_CP0_64(31, 6)
78 #define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7)
79
80
81 #define KVM_MAX_VCPUS 8
82 #define KVM_USER_MEM_SLOTS 8
83 /* memory slots that does not exposed to userspace */
84 #define KVM_PRIVATE_MEM_SLOTS 0
85
86 #define KVM_HALT_POLL_NS_DEFAULT 500000
87
88 #ifdef CONFIG_KVM_MIPS_VZ
89 extern unsigned long GUESTID_MASK;
90 extern unsigned long GUESTID_FIRST_VERSION;
91 extern unsigned long GUESTID_VERSION_MASK;
92 #endif
93
94
95 /*
96 * Special address that contains the comm page, used for reducing # of traps
97 * This needs to be within 32Kb of 0x0 (so the zero register can be used), but
98 * preferably not at 0x0 so that most kernel NULL pointer dereferences can be
99 * caught.
100 */
101 #define KVM_GUEST_COMMPAGE_ADDR ((PAGE_SIZE > 0x8000) ? 0 : \
102 (0x8000 - PAGE_SIZE))
103
104 #define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
105 ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
106
107 #define KVM_GUEST_KUSEG 0x00000000UL
108 #define KVM_GUEST_KSEG0 0x40000000UL
109 #define KVM_GUEST_KSEG1 0x40000000UL
110 #define KVM_GUEST_KSEG23 0x60000000UL
111 #define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0xe0000000)
112 #define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
113
114 #define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
115 #define KVM_GUEST_CKSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
116 #define KVM_GUEST_CKSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
117
118 /*
119 * Map an address to a certain kernel segment
120 */
121 #define KVM_GUEST_KSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
122 #define KVM_GUEST_KSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
123 #define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
124
125 #define KVM_INVALID_PAGE 0xdeadbeef
126 #define KVM_INVALID_ADDR 0xdeadbeef
127
128 /*
129 * EVA has overlapping user & kernel address spaces, so user VAs may be >
130 * PAGE_OFFSET. For this reason we can't use the default KVM_HVA_ERR_BAD of
131 * PAGE_OFFSET.
132 */
133
134 #define KVM_HVA_ERR_BAD (-1UL)
135 #define KVM_HVA_ERR_RO_BAD (-2UL)
136
137 static inline bool kvm_is_error_hva(unsigned long addr)
138 {
139 return IS_ERR_VALUE(addr);
140 }
141
142 struct kvm_vm_stat {
143 ulong remote_tlb_flush;
144 };
145
146 struct kvm_vcpu_stat {
147 u64 wait_exits;
148 u64 cache_exits;
149 u64 signal_exits;
150 u64 int_exits;
151 u64 cop_unusable_exits;
152 u64 tlbmod_exits;
153 u64 tlbmiss_ld_exits;
154 u64 tlbmiss_st_exits;
155 u64 addrerr_st_exits;
156 u64 addrerr_ld_exits;
157 u64 syscall_exits;
158 u64 resvd_inst_exits;
159 u64 break_inst_exits;
160 u64 trap_inst_exits;
161 u64 msa_fpe_exits;
162 u64 fpe_exits;
163 u64 msa_disabled_exits;
164 u64 flush_dcache_exits;
165 #ifdef CONFIG_KVM_MIPS_VZ
166 u64 vz_gpsi_exits;
167 u64 vz_gsfc_exits;
168 u64 vz_hc_exits;
169 u64 vz_grr_exits;
170 u64 vz_gva_exits;
171 u64 vz_ghfc_exits;
172 u64 vz_gpa_exits;
173 u64 vz_resvd_exits;
174 #endif
175 u64 halt_successful_poll;
176 u64 halt_attempted_poll;
177 u64 halt_poll_invalid;
178 u64 halt_wakeup;
179 };
180
181 struct kvm_arch_memory_slot {
182 };
183
184 struct kvm_arch {
185 /* Guest physical mm */
186 struct mm_struct gpa_mm;
187 /* Mask of CPUs needing GPA ASID flush */
188 cpumask_t asid_flush_mask;
189 };
190
191 #define N_MIPS_COPROC_REGS 32
192 #define N_MIPS_COPROC_SEL 8
193
194 struct mips_coproc {
195 unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
196 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
197 unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
198 #endif
199 };
200
201 /*
202 * Coprocessor 0 register names
203 */
204 #define MIPS_CP0_TLB_INDEX 0
205 #define MIPS_CP0_TLB_RANDOM 1
206 #define MIPS_CP0_TLB_LOW 2
207 #define MIPS_CP0_TLB_LO0 2
208 #define MIPS_CP0_TLB_LO1 3
209 #define MIPS_CP0_TLB_CONTEXT 4
210 #define MIPS_CP0_TLB_PG_MASK 5
211 #define MIPS_CP0_TLB_WIRED 6
212 #define MIPS_CP0_HWRENA 7
213 #define MIPS_CP0_BAD_VADDR 8
214 #define MIPS_CP0_COUNT 9
215 #define MIPS_CP0_TLB_HI 10
216 #define MIPS_CP0_COMPARE 11
217 #define MIPS_CP0_STATUS 12
218 #define MIPS_CP0_CAUSE 13
219 #define MIPS_CP0_EXC_PC 14
220 #define MIPS_CP0_PRID 15
221 #define MIPS_CP0_CONFIG 16
222 #define MIPS_CP0_LLADDR 17
223 #define MIPS_CP0_WATCH_LO 18
224 #define MIPS_CP0_WATCH_HI 19
225 #define MIPS_CP0_TLB_XCONTEXT 20
226 #define MIPS_CP0_ECC 26
227 #define MIPS_CP0_CACHE_ERR 27
228 #define MIPS_CP0_TAG_LO 28
229 #define MIPS_CP0_TAG_HI 29
230 #define MIPS_CP0_ERROR_PC 30
231 #define MIPS_CP0_DEBUG 23
232 #define MIPS_CP0_DEPC 24
233 #define MIPS_CP0_PERFCNT 25
234 #define MIPS_CP0_ERRCTL 26
235 #define MIPS_CP0_DATA_LO 28
236 #define MIPS_CP0_DATA_HI 29
237 #define MIPS_CP0_DESAVE 31
238
239 #define MIPS_CP0_CONFIG_SEL 0
240 #define MIPS_CP0_CONFIG1_SEL 1
241 #define MIPS_CP0_CONFIG2_SEL 2
242 #define MIPS_CP0_CONFIG3_SEL 3
243 #define MIPS_CP0_CONFIG4_SEL 4
244 #define MIPS_CP0_CONFIG5_SEL 5
245
246 #define MIPS_CP0_GUESTCTL2 10
247 #define MIPS_CP0_GUESTCTL2_SEL 5
248 #define MIPS_CP0_GTOFFSET 12
249 #define MIPS_CP0_GTOFFSET_SEL 7
250
251 /* Resume Flags */
252 #define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */
253 #define RESUME_FLAG_HOST (1<<1) /* Resume host? */
254
255 #define RESUME_GUEST 0
256 #define RESUME_GUEST_DR RESUME_FLAG_DR
257 #define RESUME_HOST RESUME_FLAG_HOST
258
259 enum emulation_result {
260 EMULATE_DONE, /* no further processing */
261 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
262 EMULATE_FAIL, /* can't emulate this instruction */
263 EMULATE_WAIT, /* WAIT instruction */
264 EMULATE_PRIV_FAIL,
265 EMULATE_EXCEPT, /* A guest exception has been generated */
266 EMULATE_HYPERCALL, /* HYPCALL instruction */
267 };
268
269 #define mips3_paddr_to_tlbpfn(x) \
270 (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
271 #define mips3_tlbpfn_to_paddr(x) \
272 ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT)
273
274 #define MIPS3_PG_SHIFT 6
275 #define MIPS3_PG_FRAME 0x3fffffc0
276
277 #define VPN2_MASK 0xffffe000
278 #define KVM_ENTRYHI_ASID MIPS_ENTRYHI_ASID
279 #define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G)
280 #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
281 #define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID)
282 #define TLB_LO_IDX(x, va) (((va) >> PAGE_SHIFT) & 1)
283 #define TLB_IS_VALID(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_V)
284 #define TLB_IS_DIRTY(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_D)
285 #define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \
286 ((y) & VPN2_MASK & ~(x).tlb_mask))
287 #define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \
288 TLB_ASID(x) == ((y) & KVM_ENTRYHI_ASID))
289
290 struct kvm_mips_tlb {
291 long tlb_mask;
292 long tlb_hi;
293 long tlb_lo[2];
294 };
295
296 #define KVM_NR_MEM_OBJS 4
297
298 /*
299 * We don't want allocation failures within the mmu code, so we preallocate
300 * enough memory for a single page fault in a cache.
301 */
302 struct kvm_mmu_memory_cache {
303 int nobjs;
304 void *objects[KVM_NR_MEM_OBJS];
305 };
306
307 #define KVM_MIPS_AUX_FPU 0x1
308 #define KVM_MIPS_AUX_MSA 0x2
309
310 #define KVM_MIPS_GUEST_TLB_SIZE 64
311 struct kvm_vcpu_arch {
312 void *guest_ebase;
313 int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
314
315 /* Host registers preserved across guest mode execution */
316 unsigned long host_stack;
317 unsigned long host_gp;
318 unsigned long host_pgd;
319 unsigned long host_entryhi;
320
321 /* Host CP0 registers used when handling exits from guest */
322 unsigned long host_cp0_badvaddr;
323 unsigned long host_cp0_epc;
324 u32 host_cp0_cause;
325 u32 host_cp0_guestctl0;
326 u32 host_cp0_badinstr;
327 u32 host_cp0_badinstrp;
328
329 /* GPRS */
330 unsigned long gprs[32];
331 unsigned long hi;
332 unsigned long lo;
333 unsigned long pc;
334
335 /* FPU State */
336 struct mips_fpu_struct fpu;
337 /* Which auxiliary state is loaded (KVM_MIPS_AUX_*) */
338 unsigned int aux_inuse;
339
340 /* COP0 State */
341 struct mips_coproc *cop0;
342
343 /* Host KSEG0 address of the EI/DI offset */
344 void *kseg0_commpage;
345
346 /* Resume PC after MMIO completion */
347 unsigned long io_pc;
348 /* GPR used as IO source/target */
349 u32 io_gpr;
350
351 struct hrtimer comparecount_timer;
352 /* Count timer control KVM register */
353 u32 count_ctl;
354 /* Count bias from the raw time */
355 u32 count_bias;
356 /* Frequency of timer in Hz */
357 u32 count_hz;
358 /* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */
359 s64 count_dyn_bias;
360 /* Resume time */
361 ktime_t count_resume;
362 /* Period of timer tick in ns */
363 u64 count_period;
364
365 /* Bitmask of exceptions that are pending */
366 unsigned long pending_exceptions;
367
368 /* Bitmask of pending exceptions to be cleared */
369 unsigned long pending_exceptions_clr;
370
371 /* S/W Based TLB for guest */
372 struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE];
373
374 /* Guest kernel/user [partial] mm */
375 struct mm_struct guest_kernel_mm, guest_user_mm;
376
377 /* Guest ASID of last user mode execution */
378 unsigned int last_user_gasid;
379
380 /* Cache some mmu pages needed inside spinlock regions */
381 struct kvm_mmu_memory_cache mmu_page_cache;
382
383 #ifdef CONFIG_KVM_MIPS_VZ
384 /* vcpu's vzguestid is different on each host cpu in an smp system */
385 u32 vzguestid[NR_CPUS];
386
387 /* wired guest TLB entries */
388 struct kvm_mips_tlb *wired_tlb;
389 unsigned int wired_tlb_limit;
390 unsigned int wired_tlb_used;
391
392 /* emulated guest MAAR registers */
393 unsigned long maar[6];
394 #endif
395
396 /* Last CPU the VCPU state was loaded on */
397 int last_sched_cpu;
398 /* Last CPU the VCPU actually executed guest code on */
399 int last_exec_cpu;
400
401 /* WAIT executed */
402 int wait;
403
404 u8 fpu_enabled;
405 u8 msa_enabled;
406 };
407
408 static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
409 unsigned long val)
410 {
411 unsigned long temp;
412 do {
413 __asm__ __volatile__(
414 " .set "MIPS_ISA_ARCH_LEVEL" \n"
415 " " __LL "%0, %1 \n"
416 " or %0, %2 \n"
417 " " __SC "%0, %1 \n"
418 " .set mips0 \n"
419 : "=&r" (temp), "+m" (*reg)
420 : "r" (val));
421 } while (unlikely(!temp));
422 }
423
424 static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
425 unsigned long val)
426 {
427 unsigned long temp;
428 do {
429 __asm__ __volatile__(
430 " .set "MIPS_ISA_ARCH_LEVEL" \n"
431 " " __LL "%0, %1 \n"
432 " and %0, %2 \n"
433 " " __SC "%0, %1 \n"
434 " .set mips0 \n"
435 : "=&r" (temp), "+m" (*reg)
436 : "r" (~val));
437 } while (unlikely(!temp));
438 }
439
440 static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
441 unsigned long change,
442 unsigned long val)
443 {
444 unsigned long temp;
445 do {
446 __asm__ __volatile__(
447 " .set "MIPS_ISA_ARCH_LEVEL" \n"
448 " " __LL "%0, %1 \n"
449 " and %0, %2 \n"
450 " or %0, %3 \n"
451 " " __SC "%0, %1 \n"
452 " .set mips0 \n"
453 : "=&r" (temp), "+m" (*reg)
454 : "r" (~change), "r" (val & change));
455 } while (unlikely(!temp));
456 }
457
458 /* Guest register types, used in accessor build below */
459 #define __KVMT32 u32
460 #define __KVMTl unsigned long
461
462 /*
463 * __BUILD_KVM_$ops_SAVED(): kvm_$op_sw_gc0_$reg()
464 * These operate on the saved guest C0 state in RAM.
465 */
466
467 /* Generate saved context simple accessors */
468 #define __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
469 static inline __KVMT##type kvm_read_sw_gc0_##name(struct mips_coproc *cop0) \
470 { \
471 return cop0->reg[(_reg)][(sel)]; \
472 } \
473 static inline void kvm_write_sw_gc0_##name(struct mips_coproc *cop0, \
474 __KVMT##type val) \
475 { \
476 cop0->reg[(_reg)][(sel)] = val; \
477 }
478
479 /* Generate saved context bitwise modifiers */
480 #define __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
481 static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \
482 __KVMT##type val) \
483 { \
484 cop0->reg[(_reg)][(sel)] |= val; \
485 } \
486 static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \
487 __KVMT##type val) \
488 { \
489 cop0->reg[(_reg)][(sel)] &= ~val; \
490 } \
491 static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \
492 __KVMT##type mask, \
493 __KVMT##type val) \
494 { \
495 unsigned long _mask = mask; \
496 cop0->reg[(_reg)][(sel)] &= ~_mask; \
497 cop0->reg[(_reg)][(sel)] |= val & _mask; \
498 }
499
500 /* Generate saved context atomic bitwise modifiers */
501 #define __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \
502 static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \
503 __KVMT##type val) \
504 { \
505 _kvm_atomic_set_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \
506 } \
507 static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \
508 __KVMT##type val) \
509 { \
510 _kvm_atomic_clear_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \
511 } \
512 static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \
513 __KVMT##type mask, \
514 __KVMT##type val) \
515 { \
516 _kvm_atomic_change_c0_guest_reg(&cop0->reg[(_reg)][(sel)], mask, \
517 val); \
518 }
519
520 /*
521 * __BUILD_KVM_$ops_VZ(): kvm_$op_vz_gc0_$reg()
522 * These operate on the VZ guest C0 context in hardware.
523 */
524
525 /* Generate VZ guest context simple accessors */
526 #define __BUILD_KVM_RW_VZ(name, type, _reg, sel) \
527 static inline __KVMT##type kvm_read_vz_gc0_##name(struct mips_coproc *cop0) \
528 { \
529 return read_gc0_##name(); \
530 } \
531 static inline void kvm_write_vz_gc0_##name(struct mips_coproc *cop0, \
532 __KVMT##type val) \
533 { \
534 write_gc0_##name(val); \
535 }
536
537 /* Generate VZ guest context bitwise modifiers */
538 #define __BUILD_KVM_SET_VZ(name, type, _reg, sel) \
539 static inline void kvm_set_vz_gc0_##name(struct mips_coproc *cop0, \
540 __KVMT##type val) \
541 { \
542 set_gc0_##name(val); \
543 } \
544 static inline void kvm_clear_vz_gc0_##name(struct mips_coproc *cop0, \
545 __KVMT##type val) \
546 { \
547 clear_gc0_##name(val); \
548 } \
549 static inline void kvm_change_vz_gc0_##name(struct mips_coproc *cop0, \
550 __KVMT##type mask, \
551 __KVMT##type val) \
552 { \
553 change_gc0_##name(mask, val); \
554 }
555
556 /* Generate VZ guest context save/restore to/from saved context */
557 #define __BUILD_KVM_SAVE_VZ(name, _reg, sel) \
558 static inline void kvm_restore_gc0_##name(struct mips_coproc *cop0) \
559 { \
560 write_gc0_##name(cop0->reg[(_reg)][(sel)]); \
561 } \
562 static inline void kvm_save_gc0_##name(struct mips_coproc *cop0) \
563 { \
564 cop0->reg[(_reg)][(sel)] = read_gc0_##name(); \
565 }
566
567 /*
568 * __BUILD_KVM_$ops_WRAP(): kvm_$op_$name1() -> kvm_$op_$name2()
569 * These wrap a set of operations to provide them with a different name.
570 */
571
572 /* Generate simple accessor wrapper */
573 #define __BUILD_KVM_RW_WRAP(name1, name2, type) \
574 static inline __KVMT##type kvm_read_##name1(struct mips_coproc *cop0) \
575 { \
576 return kvm_read_##name2(cop0); \
577 } \
578 static inline void kvm_write_##name1(struct mips_coproc *cop0, \
579 __KVMT##type val) \
580 { \
581 kvm_write_##name2(cop0, val); \
582 }
583
584 /* Generate bitwise modifier wrapper */
585 #define __BUILD_KVM_SET_WRAP(name1, name2, type) \
586 static inline void kvm_set_##name1(struct mips_coproc *cop0, \
587 __KVMT##type val) \
588 { \
589 kvm_set_##name2(cop0, val); \
590 } \
591 static inline void kvm_clear_##name1(struct mips_coproc *cop0, \
592 __KVMT##type val) \
593 { \
594 kvm_clear_##name2(cop0, val); \
595 } \
596 static inline void kvm_change_##name1(struct mips_coproc *cop0, \
597 __KVMT##type mask, \
598 __KVMT##type val) \
599 { \
600 kvm_change_##name2(cop0, mask, val); \
601 }
602
603 /*
604 * __BUILD_KVM_$ops_SW(): kvm_$op_c0_guest_$reg() -> kvm_$op_sw_gc0_$reg()
605 * These generate accessors operating on the saved context in RAM, and wrap them
606 * with the common guest C0 accessors (for use by common emulation code).
607 */
608
609 #define __BUILD_KVM_RW_SW(name, type, _reg, sel) \
610 __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
611 __BUILD_KVM_RW_WRAP(c0_guest_##name, sw_gc0_##name, type)
612
613 #define __BUILD_KVM_SET_SW(name, type, _reg, sel) \
614 __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
615 __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
616
617 #define __BUILD_KVM_ATOMIC_SW(name, type, _reg, sel) \
618 __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \
619 __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
620
621 #ifndef CONFIG_KVM_MIPS_VZ
622
623 /*
624 * T&E (trap & emulate software based virtualisation)
625 * We generate the common accessors operating exclusively on the saved context
626 * in RAM.
627 */
628
629 #define __BUILD_KVM_RW_HW __BUILD_KVM_RW_SW
630 #define __BUILD_KVM_SET_HW __BUILD_KVM_SET_SW
631 #define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_ATOMIC_SW
632
633 #else
634
635 /*
636 * VZ (hardware assisted virtualisation)
637 * These macros use the active guest state in VZ mode (hardware registers),
638 */
639
640 /*
641 * __BUILD_KVM_$ops_HW(): kvm_$op_c0_guest_$reg() -> kvm_$op_vz_gc0_$reg()
642 * These generate accessors operating on the VZ guest context in hardware, and
643 * wrap them with the common guest C0 accessors (for use by common emulation
644 * code).
645 *
646 * Accessors operating on the saved context in RAM are also generated to allow
647 * convenient explicit saving and restoring of the state.
648 */
649
650 #define __BUILD_KVM_RW_HW(name, type, _reg, sel) \
651 __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
652 __BUILD_KVM_RW_VZ(name, type, _reg, sel) \
653 __BUILD_KVM_RW_WRAP(c0_guest_##name, vz_gc0_##name, type) \
654 __BUILD_KVM_SAVE_VZ(name, _reg, sel)
655
656 #define __BUILD_KVM_SET_HW(name, type, _reg, sel) \
657 __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
658 __BUILD_KVM_SET_VZ(name, type, _reg, sel) \
659 __BUILD_KVM_SET_WRAP(c0_guest_##name, vz_gc0_##name, type)
660
661 /*
662 * We can't do atomic modifications of COP0 state if hardware can modify it.
663 * Races must be handled explicitly.
664 */
665 #define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_SET_HW
666
667 #endif
668
669 /*
670 * Define accessors for CP0 registers that are accessible to the guest. These
671 * are primarily used by common emulation code, which may need to access the
672 * registers differently depending on the implementation.
673 *
674 * fns_hw/sw name type reg num select
675 */
676 __BUILD_KVM_RW_HW(index, 32, MIPS_CP0_TLB_INDEX, 0)
677 __BUILD_KVM_RW_HW(entrylo0, l, MIPS_CP0_TLB_LO0, 0)
678 __BUILD_KVM_RW_HW(entrylo1, l, MIPS_CP0_TLB_LO1, 0)
679 __BUILD_KVM_RW_HW(context, l, MIPS_CP0_TLB_CONTEXT, 0)
680 __BUILD_KVM_RW_HW(contextconfig, 32, MIPS_CP0_TLB_CONTEXT, 1)
681 __BUILD_KVM_RW_HW(userlocal, l, MIPS_CP0_TLB_CONTEXT, 2)
682 __BUILD_KVM_RW_HW(xcontextconfig, l, MIPS_CP0_TLB_CONTEXT, 3)
683 __BUILD_KVM_RW_HW(pagemask, l, MIPS_CP0_TLB_PG_MASK, 0)
684 __BUILD_KVM_RW_HW(pagegrain, 32, MIPS_CP0_TLB_PG_MASK, 1)
685 __BUILD_KVM_RW_HW(segctl0, l, MIPS_CP0_TLB_PG_MASK, 2)
686 __BUILD_KVM_RW_HW(segctl1, l, MIPS_CP0_TLB_PG_MASK, 3)
687 __BUILD_KVM_RW_HW(segctl2, l, MIPS_CP0_TLB_PG_MASK, 4)
688 __BUILD_KVM_RW_HW(pwbase, l, MIPS_CP0_TLB_PG_MASK, 5)
689 __BUILD_KVM_RW_HW(pwfield, l, MIPS_CP0_TLB_PG_MASK, 6)
690 __BUILD_KVM_RW_HW(pwsize, l, MIPS_CP0_TLB_PG_MASK, 7)
691 __BUILD_KVM_RW_HW(wired, 32, MIPS_CP0_TLB_WIRED, 0)
692 __BUILD_KVM_RW_HW(pwctl, 32, MIPS_CP0_TLB_WIRED, 6)
693 __BUILD_KVM_RW_HW(hwrena, 32, MIPS_CP0_HWRENA, 0)
694 __BUILD_KVM_RW_HW(badvaddr, l, MIPS_CP0_BAD_VADDR, 0)
695 __BUILD_KVM_RW_HW(badinstr, 32, MIPS_CP0_BAD_VADDR, 1)
696 __BUILD_KVM_RW_HW(badinstrp, 32, MIPS_CP0_BAD_VADDR, 2)
697 __BUILD_KVM_RW_SW(count, 32, MIPS_CP0_COUNT, 0)
698 __BUILD_KVM_RW_HW(entryhi, l, MIPS_CP0_TLB_HI, 0)
699 __BUILD_KVM_RW_HW(compare, 32, MIPS_CP0_COMPARE, 0)
700 __BUILD_KVM_RW_HW(status, 32, MIPS_CP0_STATUS, 0)
701 __BUILD_KVM_RW_HW(intctl, 32, MIPS_CP0_STATUS, 1)
702 __BUILD_KVM_RW_HW(cause, 32, MIPS_CP0_CAUSE, 0)
703 __BUILD_KVM_RW_HW(epc, l, MIPS_CP0_EXC_PC, 0)
704 __BUILD_KVM_RW_SW(prid, 32, MIPS_CP0_PRID, 0)
705 __BUILD_KVM_RW_HW(ebase, l, MIPS_CP0_PRID, 1)
706 __BUILD_KVM_RW_HW(config, 32, MIPS_CP0_CONFIG, 0)
707 __BUILD_KVM_RW_HW(config1, 32, MIPS_CP0_CONFIG, 1)
708 __BUILD_KVM_RW_HW(config2, 32, MIPS_CP0_CONFIG, 2)
709 __BUILD_KVM_RW_HW(config3, 32, MIPS_CP0_CONFIG, 3)
710 __BUILD_KVM_RW_HW(config4, 32, MIPS_CP0_CONFIG, 4)
711 __BUILD_KVM_RW_HW(config5, 32, MIPS_CP0_CONFIG, 5)
712 __BUILD_KVM_RW_HW(config6, 32, MIPS_CP0_CONFIG, 6)
713 __BUILD_KVM_RW_HW(config7, 32, MIPS_CP0_CONFIG, 7)
714 __BUILD_KVM_RW_SW(maari, l, MIPS_CP0_LLADDR, 2)
715 __BUILD_KVM_RW_HW(xcontext, l, MIPS_CP0_TLB_XCONTEXT, 0)
716 __BUILD_KVM_RW_HW(errorepc, l, MIPS_CP0_ERROR_PC, 0)
717 __BUILD_KVM_RW_HW(kscratch1, l, MIPS_CP0_DESAVE, 2)
718 __BUILD_KVM_RW_HW(kscratch2, l, MIPS_CP0_DESAVE, 3)
719 __BUILD_KVM_RW_HW(kscratch3, l, MIPS_CP0_DESAVE, 4)
720 __BUILD_KVM_RW_HW(kscratch4, l, MIPS_CP0_DESAVE, 5)
721 __BUILD_KVM_RW_HW(kscratch5, l, MIPS_CP0_DESAVE, 6)
722 __BUILD_KVM_RW_HW(kscratch6, l, MIPS_CP0_DESAVE, 7)
723
724 /* Bitwise operations (on HW state) */
725 __BUILD_KVM_SET_HW(status, 32, MIPS_CP0_STATUS, 0)
726 /* Cause can be modified asynchronously from hardirq hrtimer callback */
727 __BUILD_KVM_ATOMIC_HW(cause, 32, MIPS_CP0_CAUSE, 0)
728 __BUILD_KVM_SET_HW(ebase, l, MIPS_CP0_PRID, 1)
729
730 /* Bitwise operations (on saved state) */
731 __BUILD_KVM_SET_SAVED(config, 32, MIPS_CP0_CONFIG, 0)
732 __BUILD_KVM_SET_SAVED(config1, 32, MIPS_CP0_CONFIG, 1)
733 __BUILD_KVM_SET_SAVED(config2, 32, MIPS_CP0_CONFIG, 2)
734 __BUILD_KVM_SET_SAVED(config3, 32, MIPS_CP0_CONFIG, 3)
735 __BUILD_KVM_SET_SAVED(config4, 32, MIPS_CP0_CONFIG, 4)
736 __BUILD_KVM_SET_SAVED(config5, 32, MIPS_CP0_CONFIG, 5)
737
738 /* Helpers */
739
740 static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
741 {
742 return (!__builtin_constant_p(raw_cpu_has_fpu) || raw_cpu_has_fpu) &&
743 vcpu->fpu_enabled;
744 }
745
746 static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
747 {
748 return kvm_mips_guest_can_have_fpu(vcpu) &&
749 kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP;
750 }
751
752 static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
753 {
754 return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) &&
755 vcpu->msa_enabled;
756 }
757
758 static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
759 {
760 return kvm_mips_guest_can_have_msa(vcpu) &&
761 kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA;
762 }
763
764 struct kvm_mips_callbacks {
765 int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
766 int (*handle_tlb_mod)(struct kvm_vcpu *vcpu);
767 int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu);
768 int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu);
769 int (*handle_addr_err_st)(struct kvm_vcpu *vcpu);
770 int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu);
771 int (*handle_syscall)(struct kvm_vcpu *vcpu);
772 int (*handle_res_inst)(struct kvm_vcpu *vcpu);
773 int (*handle_break)(struct kvm_vcpu *vcpu);
774 int (*handle_trap)(struct kvm_vcpu *vcpu);
775 int (*handle_msa_fpe)(struct kvm_vcpu *vcpu);
776 int (*handle_fpe)(struct kvm_vcpu *vcpu);
777 int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
778 int (*handle_guest_exit)(struct kvm_vcpu *vcpu);
779 int (*hardware_enable)(void);
780 void (*hardware_disable)(void);
781 int (*check_extension)(struct kvm *kvm, long ext);
782 int (*vcpu_init)(struct kvm_vcpu *vcpu);
783 void (*vcpu_uninit)(struct kvm_vcpu *vcpu);
784 int (*vcpu_setup)(struct kvm_vcpu *vcpu);
785 void (*flush_shadow_all)(struct kvm *kvm);
786 /*
787 * Must take care of flushing any cached GPA PTEs (e.g. guest entries in
788 * VZ root TLB, or T&E GVA page tables and corresponding root TLB
789 * mappings).
790 */
791 void (*flush_shadow_memslot)(struct kvm *kvm,
792 const struct kvm_memory_slot *slot);
793 gpa_t (*gva_to_gpa)(gva_t gva);
794 void (*queue_timer_int)(struct kvm_vcpu *vcpu);
795 void (*dequeue_timer_int)(struct kvm_vcpu *vcpu);
796 void (*queue_io_int)(struct kvm_vcpu *vcpu,
797 struct kvm_mips_interrupt *irq);
798 void (*dequeue_io_int)(struct kvm_vcpu *vcpu,
799 struct kvm_mips_interrupt *irq);
800 int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority,
801 u32 cause);
802 int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority,
803 u32 cause);
804 unsigned long (*num_regs)(struct kvm_vcpu *vcpu);
805 int (*copy_reg_indices)(struct kvm_vcpu *vcpu, u64 __user *indices);
806 int (*get_one_reg)(struct kvm_vcpu *vcpu,
807 const struct kvm_one_reg *reg, s64 *v);
808 int (*set_one_reg)(struct kvm_vcpu *vcpu,
809 const struct kvm_one_reg *reg, s64 v);
810 int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
811 int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu);
812 int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
813 void (*vcpu_reenter)(struct kvm_run *run, struct kvm_vcpu *vcpu);
814 };
815 extern struct kvm_mips_callbacks *kvm_mips_callbacks;
816 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
817
818 /* Debug: dump vcpu state */
819 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
820
821 extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu);
822
823 /* Building of entry/exception code */
824 int kvm_mips_entry_setup(void);
825 void *kvm_mips_build_vcpu_run(void *addr);
826 void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler);
827 void *kvm_mips_build_exception(void *addr, void *handler);
828 void *kvm_mips_build_exit(void *addr);
829
830 /* FPU/MSA context management */
831 void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
832 void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu);
833 void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu);
834 void __kvm_save_msa(struct kvm_vcpu_arch *vcpu);
835 void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu);
836 void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu);
837 void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu);
838 void kvm_own_fpu(struct kvm_vcpu *vcpu);
839 void kvm_own_msa(struct kvm_vcpu *vcpu);
840 void kvm_drop_fpu(struct kvm_vcpu *vcpu);
841 void kvm_lose_fpu(struct kvm_vcpu *vcpu);
842
843 /* TLB handling */
844 u32 kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
845
846 u32 kvm_get_user_asid(struct kvm_vcpu *vcpu);
847
848 u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
849
850 #ifdef CONFIG_KVM_MIPS_VZ
851 int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
852 struct kvm_vcpu *vcpu, bool write_fault);
853 #endif
854 extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
855 struct kvm_vcpu *vcpu,
856 bool write_fault);
857
858 extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
859 struct kvm_vcpu *vcpu);
860
861 extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
862 struct kvm_mips_tlb *tlb,
863 unsigned long gva,
864 bool write_fault);
865
866 extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
867 u32 *opc,
868 struct kvm_run *run,
869 struct kvm_vcpu *vcpu,
870 bool write_fault);
871
872 extern void kvm_mips_dump_host_tlbs(void);
873 extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
874 extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi,
875 bool user, bool kernel);
876
877 extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
878 unsigned long entryhi);
879
880 #ifdef CONFIG_KVM_MIPS_VZ
881 int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
882 int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
883 unsigned long *gpa);
884 void kvm_vz_local_flush_roottlb_all_guests(void);
885 void kvm_vz_local_flush_guesttlb_all(void);
886 void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
887 unsigned int count);
888 void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
889 unsigned int count);
890 #endif
891
892 void kvm_mips_suspend_mm(int cpu);
893 void kvm_mips_resume_mm(int cpu);
894
895 /* MMU handling */
896
897 /**
898 * enum kvm_mips_flush - Types of MMU flushes.
899 * @KMF_USER: Flush guest user virtual memory mappings.
900 * Guest USeg only.
901 * @KMF_KERN: Flush guest kernel virtual memory mappings.
902 * Guest USeg and KSeg2/3.
903 * @KMF_GPA: Flush guest physical memory mappings.
904 * Also includes KSeg0 if KMF_KERN is set.
905 */
906 enum kvm_mips_flush {
907 KMF_USER = 0x0,
908 KMF_KERN = 0x1,
909 KMF_GPA = 0x2,
910 };
911 void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags);
912 bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
913 int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
914 pgd_t *kvm_pgd_alloc(void);
915 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
916 void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
917 bool user);
918 void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu);
919 void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu);
920
921 enum kvm_mips_fault_result {
922 KVM_MIPS_MAPPED = 0,
923 KVM_MIPS_GVA,
924 KVM_MIPS_GPA,
925 KVM_MIPS_TLB,
926 KVM_MIPS_TLBINV,
927 KVM_MIPS_TLBMOD,
928 };
929 enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
930 unsigned long gva,
931 bool write);
932
933 #define KVM_ARCH_WANT_MMU_NOTIFIER
934 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
935 int kvm_unmap_hva_range(struct kvm *kvm,
936 unsigned long start, unsigned long end);
937 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
938 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
939 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
940
941 /* Emulation */
942 int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
943 enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause);
944 int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
945 int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
946
947 /**
948 * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
949 * @vcpu: Virtual CPU.
950 *
951 * Returns: Whether the TLBL exception was likely due to an instruction
952 * fetch fault rather than a data load fault.
953 */
954 static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *vcpu)
955 {
956 unsigned long badvaddr = vcpu->host_cp0_badvaddr;
957 unsigned long epc = msk_isa16_mode(vcpu->pc);
958 u32 cause = vcpu->host_cp0_cause;
959
960 if (epc == badvaddr)
961 return true;
962
963 /*
964 * Branches may be 32-bit or 16-bit instructions.
965 * This isn't exact, but we don't really support MIPS16 or microMIPS yet
966 * in KVM anyway.
967 */
968 if ((cause & CAUSEF_BD) && badvaddr - epc <= 4)
969 return true;
970
971 return false;
972 }
973
974 extern enum emulation_result kvm_mips_emulate_inst(u32 cause,
975 u32 *opc,
976 struct kvm_run *run,
977 struct kvm_vcpu *vcpu);
978
979 long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu);
980
981 extern enum emulation_result kvm_mips_emulate_syscall(u32 cause,
982 u32 *opc,
983 struct kvm_run *run,
984 struct kvm_vcpu *vcpu);
985
986 extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
987 u32 *opc,
988 struct kvm_run *run,
989 struct kvm_vcpu *vcpu);
990
991 extern enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
992 u32 *opc,
993 struct kvm_run *run,
994 struct kvm_vcpu *vcpu);
995
996 extern enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
997 u32 *opc,
998 struct kvm_run *run,
999 struct kvm_vcpu *vcpu);
1000
1001 extern enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
1002 u32 *opc,
1003 struct kvm_run *run,
1004 struct kvm_vcpu *vcpu);
1005
1006 extern enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
1007 u32 *opc,
1008 struct kvm_run *run,
1009 struct kvm_vcpu *vcpu);
1010
1011 extern enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
1012 u32 *opc,
1013 struct kvm_run *run,
1014 struct kvm_vcpu *vcpu);
1015
1016 extern enum emulation_result kvm_mips_handle_ri(u32 cause,
1017 u32 *opc,
1018 struct kvm_run *run,
1019 struct kvm_vcpu *vcpu);
1020
1021 extern enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
1022 u32 *opc,
1023 struct kvm_run *run,
1024 struct kvm_vcpu *vcpu);
1025
1026 extern enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
1027 u32 *opc,
1028 struct kvm_run *run,
1029 struct kvm_vcpu *vcpu);
1030
1031 extern enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
1032 u32 *opc,
1033 struct kvm_run *run,
1034 struct kvm_vcpu *vcpu);
1035
1036 extern enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
1037 u32 *opc,
1038 struct kvm_run *run,
1039 struct kvm_vcpu *vcpu);
1040
1041 extern enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
1042 u32 *opc,
1043 struct kvm_run *run,
1044 struct kvm_vcpu *vcpu);
1045
1046 extern enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
1047 u32 *opc,
1048 struct kvm_run *run,
1049 struct kvm_vcpu *vcpu);
1050
1051 extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
1052 struct kvm_run *run);
1053
1054 u32 kvm_mips_read_count(struct kvm_vcpu *vcpu);
1055 void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count);
1056 void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack);
1057 void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz);
1058 int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
1059 int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
1060 int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz);
1061 void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
1062 void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
1063 enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
1064
1065 /* fairly internal functions requiring some care to use */
1066 int kvm_mips_count_disabled(struct kvm_vcpu *vcpu);
1067 ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count);
1068 int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
1069 u32 count, int min_drift);
1070
1071 #ifdef CONFIG_KVM_MIPS_VZ
1072 void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu);
1073 void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu);
1074 #else
1075 static inline void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu) {}
1076 static inline void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu) {}
1077 #endif
1078
1079 enum emulation_result kvm_mips_check_privilege(u32 cause,
1080 u32 *opc,
1081 struct kvm_run *run,
1082 struct kvm_vcpu *vcpu);
1083
1084 enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
1085 u32 *opc,
1086 u32 cause,
1087 struct kvm_run *run,
1088 struct kvm_vcpu *vcpu);
1089 enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
1090 u32 *opc,
1091 u32 cause,
1092 struct kvm_run *run,
1093 struct kvm_vcpu *vcpu);
1094 enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
1095 u32 cause,
1096 struct kvm_run *run,
1097 struct kvm_vcpu *vcpu);
1098 enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
1099 u32 cause,
1100 struct kvm_run *run,
1101 struct kvm_vcpu *vcpu);
1102
1103 /* COP0 */
1104 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu);
1105
1106 unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu);
1107 unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu);
1108 unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu);
1109 unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu);
1110
1111 /* Hypercalls (hypcall.c) */
1112
1113 enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu,
1114 union mips_instruction inst);
1115 int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu);
1116
1117 /* Dynamic binary translation */
1118 extern int kvm_mips_trans_cache_index(union mips_instruction inst,
1119 u32 *opc, struct kvm_vcpu *vcpu);
1120 extern int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc,
1121 struct kvm_vcpu *vcpu);
1122 extern int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
1123 struct kvm_vcpu *vcpu);
1124 extern int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
1125 struct kvm_vcpu *vcpu);
1126
1127 /* Misc */
1128 extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
1129 extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
1130
1131 static inline void kvm_arch_hardware_unsetup(void) {}
1132 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
1133 static inline void kvm_arch_free_memslot(struct kvm *kvm,
1134 struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
1135 static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
1136 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
1137 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
1138 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
1139 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
1140
1141 #endif /* __MIPS_KVM_HOST_H__ */