]>
Commit | Line | Data |
---|---|---|
749cf76c CD |
1 | /* |
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | |
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License, version 2, as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
17 | */ | |
18 | ||
19 | #ifndef __ARM_KVM_EMULATE_H__ | |
20 | #define __ARM_KVM_EMULATE_H__ | |
21 | ||
22 | #include <linux/kvm_host.h> | |
23 | #include <asm/kvm_asm.h> | |
45e96ea6 | 24 | #include <asm/kvm_mmio.h> |
7393b599 | 25 | #include <asm/kvm_arm.h> |
749cf76c | 26 | |
db730d8d MZ |
27 | unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); |
28 | unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); | |
749cf76c | 29 | |
c5997563 | 30 | bool kvm_condition_valid(struct kvm_vcpu *vcpu); |
5b3e5e5b CD |
31 | void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); |
32 | void kvm_inject_undefined(struct kvm_vcpu *vcpu); | |
33 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); | |
34 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); | |
35 | ||
b856a591 CD |
36 | static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) |
37 | { | |
38 | vcpu->arch.hcr = HCR_GUEST_MASK; | |
39 | } | |
40 | ||
aa024c2f MZ |
41 | static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) |
42 | { | |
43 | return 1; | |
44 | } | |
45 | ||
db730d8d | 46 | static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu) |
749cf76c | 47 | { |
db730d8d | 48 | return &vcpu->arch.regs.usr_regs.ARM_pc; |
749cf76c CD |
49 | } |
50 | ||
db730d8d | 51 | static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu) |
749cf76c | 52 | { |
db730d8d | 53 | return &vcpu->arch.regs.usr_regs.ARM_cpsr; |
749cf76c CD |
54 | } |
55 | ||
aa024c2f MZ |
56 | static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) |
57 | { | |
58 | *vcpu_cpsr(vcpu) |= PSR_T_BIT; | |
59 | } | |
60 | ||
749cf76c CD |
61 | static inline bool mode_has_spsr(struct kvm_vcpu *vcpu) |
62 | { | |
63 | unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK; | |
64 | return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE); | |
65 | } | |
66 | ||
67 | static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu) | |
68 | { | |
69 | unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK; | |
70 | return cpsr_mode > USR_MODE;; | |
71 | } | |
72 | ||
7393b599 MZ |
73 | static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu) |
74 | { | |
75 | return vcpu->arch.fault.hsr; | |
76 | } | |
77 | ||
78 | static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu) | |
79 | { | |
80 | return vcpu->arch.fault.hxfar; | |
81 | } | |
82 | ||
83 | static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu) | |
84 | { | |
85 | return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8; | |
86 | } | |
87 | ||
88 | static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu) | |
89 | { | |
90 | return vcpu->arch.fault.hyp_pc; | |
91 | } | |
92 | ||
4a1df28a MZ |
93 | static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu) |
94 | { | |
95 | return kvm_vcpu_get_hsr(vcpu) & HSR_ISV; | |
96 | } | |
97 | ||
023cc964 MZ |
98 | static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu) |
99 | { | |
100 | return kvm_vcpu_get_hsr(vcpu) & HSR_WNR; | |
101 | } | |
102 | ||
7c511b88 MZ |
103 | static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu) |
104 | { | |
105 | return kvm_vcpu_get_hsr(vcpu) & HSR_SSE; | |
106 | } | |
107 | ||
d0adf747 MZ |
108 | static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu) |
109 | { | |
110 | return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT; | |
111 | } | |
112 | ||
78abfcde MZ |
113 | static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu) |
114 | { | |
115 | return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_EA; | |
116 | } | |
117 | ||
b37670b0 MZ |
118 | static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu) |
119 | { | |
120 | return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW; | |
121 | } | |
122 | ||
a7123377 MZ |
123 | /* Get Access Size from a data abort */ |
124 | static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu) | |
125 | { | |
126 | switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) { | |
127 | case 0: | |
128 | return 1; | |
129 | case 1: | |
130 | return 2; | |
131 | case 2: | |
132 | return 4; | |
133 | default: | |
134 | kvm_err("Hardware is weird: SAS 0b11 is reserved\n"); | |
135 | return -EFAULT; | |
136 | } | |
137 | } | |
138 | ||
23b415d6 MZ |
139 | /* This one is not specific to Data Abort */ |
140 | static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu) | |
141 | { | |
142 | return kvm_vcpu_get_hsr(vcpu) & HSR_IL; | |
143 | } | |
144 | ||
4926d445 MZ |
145 | static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu) |
146 | { | |
147 | return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT; | |
148 | } | |
149 | ||
52d1dba9 MZ |
150 | static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu) |
151 | { | |
152 | return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT; | |
153 | } | |
154 | ||
1cc287dd | 155 | static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu) |
0496daa5 CD |
156 | { |
157 | return kvm_vcpu_get_hsr(vcpu) & HSR_FSC; | |
158 | } | |
159 | ||
160 | static inline u8 kvm_vcpu_trap_get_fault_type(struct kvm_vcpu *vcpu) | |
1cc287dd MZ |
161 | { |
162 | return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE; | |
163 | } | |
164 | ||
c088f8f0 CD |
165 | static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu) |
166 | { | |
167 | return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK; | |
168 | } | |
169 | ||
79c64880 MZ |
170 | static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) |
171 | { | |
172 | return vcpu->arch.cp15[c0_MPIDR]; | |
173 | } | |
174 | ||
ce94fe93 MZ |
175 | static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) |
176 | { | |
177 | *vcpu_cpsr(vcpu) |= PSR_E_BIT; | |
178 | } | |
179 | ||
6d89d2d9 MZ |
180 | static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu) |
181 | { | |
182 | return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT); | |
183 | } | |
184 | ||
185 | static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu, | |
186 | unsigned long data, | |
187 | unsigned int len) | |
188 | { | |
189 | if (kvm_vcpu_is_be(vcpu)) { | |
190 | switch (len) { | |
191 | case 1: | |
192 | return data & 0xff; | |
193 | case 2: | |
194 | return be16_to_cpu(data & 0xffff); | |
195 | default: | |
196 | return be32_to_cpu(data); | |
197 | } | |
27f194fd VK |
198 | } else { |
199 | switch (len) { | |
200 | case 1: | |
201 | return data & 0xff; | |
202 | case 2: | |
203 | return le16_to_cpu(data & 0xffff); | |
204 | default: | |
205 | return le32_to_cpu(data); | |
206 | } | |
6d89d2d9 | 207 | } |
6d89d2d9 MZ |
208 | } |
209 | ||
210 | static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, | |
211 | unsigned long data, | |
212 | unsigned int len) | |
213 | { | |
214 | if (kvm_vcpu_is_be(vcpu)) { | |
215 | switch (len) { | |
216 | case 1: | |
217 | return data & 0xff; | |
218 | case 2: | |
219 | return cpu_to_be16(data & 0xffff); | |
220 | default: | |
221 | return cpu_to_be32(data); | |
222 | } | |
27f194fd VK |
223 | } else { |
224 | switch (len) { | |
225 | case 1: | |
226 | return data & 0xff; | |
227 | case 2: | |
228 | return cpu_to_le16(data & 0xffff); | |
229 | default: | |
230 | return cpu_to_le32(data); | |
231 | } | |
6d89d2d9 | 232 | } |
6d89d2d9 MZ |
233 | } |
234 | ||
749cf76c | 235 | #endif /* __ARM_KVM_EMULATE_H__ */ |