]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - virt/kvm/arm/hyp/vgic-v3-sr.c
eb22af1073841ebf1c3bc3b0c9018db8fc7abc6f
[mirror_ubuntu-zesty-kernel.git] / virt / kvm / arm / hyp / vgic-v3-sr.c
1 /*
2 * Copyright (C) 2012-2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include <linux/compiler.h>
19 #include <linux/irqchip/arm-gic-v3.h>
20 #include <linux/kvm_host.h>
21
22 #include <asm/kvm_hyp.h>
23
24 #define vtr_to_max_lr_idx(v) ((v) & 0xf)
25 #define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1)
26
27 static u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
28 {
29 switch (lr & 0xf) {
30 case 0:
31 return read_gicreg(ICH_LR0_EL2);
32 case 1:
33 return read_gicreg(ICH_LR1_EL2);
34 case 2:
35 return read_gicreg(ICH_LR2_EL2);
36 case 3:
37 return read_gicreg(ICH_LR3_EL2);
38 case 4:
39 return read_gicreg(ICH_LR4_EL2);
40 case 5:
41 return read_gicreg(ICH_LR5_EL2);
42 case 6:
43 return read_gicreg(ICH_LR6_EL2);
44 case 7:
45 return read_gicreg(ICH_LR7_EL2);
46 case 8:
47 return read_gicreg(ICH_LR8_EL2);
48 case 9:
49 return read_gicreg(ICH_LR9_EL2);
50 case 10:
51 return read_gicreg(ICH_LR10_EL2);
52 case 11:
53 return read_gicreg(ICH_LR11_EL2);
54 case 12:
55 return read_gicreg(ICH_LR12_EL2);
56 case 13:
57 return read_gicreg(ICH_LR13_EL2);
58 case 14:
59 return read_gicreg(ICH_LR14_EL2);
60 case 15:
61 return read_gicreg(ICH_LR15_EL2);
62 }
63
64 unreachable();
65 }
66
67 static void __hyp_text __gic_v3_set_lr(u64 val, int lr)
68 {
69 switch (lr & 0xf) {
70 case 0:
71 write_gicreg(val, ICH_LR0_EL2);
72 break;
73 case 1:
74 write_gicreg(val, ICH_LR1_EL2);
75 break;
76 case 2:
77 write_gicreg(val, ICH_LR2_EL2);
78 break;
79 case 3:
80 write_gicreg(val, ICH_LR3_EL2);
81 break;
82 case 4:
83 write_gicreg(val, ICH_LR4_EL2);
84 break;
85 case 5:
86 write_gicreg(val, ICH_LR5_EL2);
87 break;
88 case 6:
89 write_gicreg(val, ICH_LR6_EL2);
90 break;
91 case 7:
92 write_gicreg(val, ICH_LR7_EL2);
93 break;
94 case 8:
95 write_gicreg(val, ICH_LR8_EL2);
96 break;
97 case 9:
98 write_gicreg(val, ICH_LR9_EL2);
99 break;
100 case 10:
101 write_gicreg(val, ICH_LR10_EL2);
102 break;
103 case 11:
104 write_gicreg(val, ICH_LR11_EL2);
105 break;
106 case 12:
107 write_gicreg(val, ICH_LR12_EL2);
108 break;
109 case 13:
110 write_gicreg(val, ICH_LR13_EL2);
111 break;
112 case 14:
113 write_gicreg(val, ICH_LR14_EL2);
114 break;
115 case 15:
116 write_gicreg(val, ICH_LR15_EL2);
117 break;
118 }
119 }
120
121 static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu, int nr_lr)
122 {
123 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
124 int i;
125 bool expect_mi;
126
127 expect_mi = !!(cpu_if->vgic_hcr & ICH_HCR_UIE);
128
129 for (i = 0; i < nr_lr; i++) {
130 if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
131 continue;
132
133 expect_mi |= (!(cpu_if->vgic_lr[i] & ICH_LR_HW) &&
134 (cpu_if->vgic_lr[i] & ICH_LR_EOI));
135 }
136
137 if (expect_mi) {
138 cpu_if->vgic_misr = read_gicreg(ICH_MISR_EL2);
139
140 if (cpu_if->vgic_misr & ICH_MISR_EOI)
141 cpu_if->vgic_eisr = read_gicreg(ICH_EISR_EL2);
142 else
143 cpu_if->vgic_eisr = 0;
144 } else {
145 cpu_if->vgic_misr = 0;
146 cpu_if->vgic_eisr = 0;
147 }
148 }
149
150 void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
151 {
152 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
153 u64 val;
154
155 /*
156 * Make sure stores to the GIC via the memory mapped interface
157 * are now visible to the system register interface.
158 */
159 if (!cpu_if->vgic_sre)
160 dsb(st);
161
162 cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
163
164 if (vcpu->arch.vgic_cpu.live_lrs) {
165 int i;
166 u32 max_lr_idx, nr_pre_bits;
167
168 cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2);
169
170 write_gicreg(0, ICH_HCR_EL2);
171 val = read_gicreg(ICH_VTR_EL2);
172 max_lr_idx = vtr_to_max_lr_idx(val);
173 nr_pre_bits = vtr_to_nr_pre_bits(val);
174
175 save_maint_int_state(vcpu, max_lr_idx + 1);
176
177 for (i = 0; i <= max_lr_idx; i++) {
178 if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
179 continue;
180
181 if (cpu_if->vgic_elrsr & (1 << i))
182 cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
183 else
184 cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
185
186 __gic_v3_set_lr(0, i);
187 }
188
189 switch (nr_pre_bits) {
190 case 7:
191 cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2);
192 cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2);
193 case 6:
194 cpu_if->vgic_ap0r[1] = read_gicreg(ICH_AP0R1_EL2);
195 default:
196 cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2);
197 }
198
199 switch (nr_pre_bits) {
200 case 7:
201 cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2);
202 cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2);
203 case 6:
204 cpu_if->vgic_ap1r[1] = read_gicreg(ICH_AP1R1_EL2);
205 default:
206 cpu_if->vgic_ap1r[0] = read_gicreg(ICH_AP1R0_EL2);
207 }
208
209 vcpu->arch.vgic_cpu.live_lrs = 0;
210 } else {
211 cpu_if->vgic_misr = 0;
212 cpu_if->vgic_eisr = 0;
213 cpu_if->vgic_elrsr = 0xffff;
214 cpu_if->vgic_ap0r[0] = 0;
215 cpu_if->vgic_ap0r[1] = 0;
216 cpu_if->vgic_ap0r[2] = 0;
217 cpu_if->vgic_ap0r[3] = 0;
218 cpu_if->vgic_ap1r[0] = 0;
219 cpu_if->vgic_ap1r[1] = 0;
220 cpu_if->vgic_ap1r[2] = 0;
221 cpu_if->vgic_ap1r[3] = 0;
222 }
223
224 val = read_gicreg(ICC_SRE_EL2);
225 write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
226
227 if (!cpu_if->vgic_sre) {
228 /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
229 isb();
230 write_gicreg(1, ICC_SRE_EL1);
231 }
232 }
233
234 void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
235 {
236 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
237 u64 val;
238 u32 max_lr_idx, nr_pre_bits;
239 u16 live_lrs = 0;
240 int i;
241
242 /*
243 * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
244 * Group0 interrupt (as generated in GICv2 mode) to be
245 * delivered as a FIQ to the guest, with potentially fatal
246 * consequences. So we must make sure that ICC_SRE_EL1 has
247 * been actually programmed with the value we want before
248 * starting to mess with the rest of the GIC.
249 */
250 if (!cpu_if->vgic_sre) {
251 write_gicreg(0, ICC_SRE_EL1);
252 isb();
253 }
254
255 val = read_gicreg(ICH_VTR_EL2);
256 max_lr_idx = vtr_to_max_lr_idx(val);
257 nr_pre_bits = vtr_to_nr_pre_bits(val);
258
259 for (i = 0; i <= max_lr_idx; i++) {
260 if (cpu_if->vgic_lr[i] & ICH_LR_STATE)
261 live_lrs |= (1 << i);
262 }
263
264 write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
265
266 if (live_lrs) {
267 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
268
269 switch (nr_pre_bits) {
270 case 7:
271 write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2);
272 write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2);
273 case 6:
274 write_gicreg(cpu_if->vgic_ap0r[1], ICH_AP0R1_EL2);
275 default:
276 write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2);
277 }
278
279 switch (nr_pre_bits) {
280 case 7:
281 write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2);
282 write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2);
283 case 6:
284 write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2);
285 default:
286 write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2);
287 }
288
289 for (i = 0; i <= max_lr_idx; i++) {
290 if (!(live_lrs & (1 << i)))
291 continue;
292
293 __gic_v3_set_lr(cpu_if->vgic_lr[i], i);
294 }
295 }
296
297 /*
298 * Ensures that the above will have reached the
299 * (re)distributors. This ensure the guest will read the
300 * correct values from the memory-mapped interface.
301 */
302 if (!cpu_if->vgic_sre) {
303 isb();
304 dsb(sy);
305 }
306 vcpu->arch.vgic_cpu.live_lrs = live_lrs;
307
308 /*
309 * Prevent the guest from touching the GIC system registers if
310 * SRE isn't enabled for GICv3 emulation.
311 */
312 write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
313 ICC_SRE_EL2);
314 }
315
316 void __hyp_text __vgic_v3_init_lrs(void)
317 {
318 int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2));
319 int i;
320
321 for (i = 0; i <= max_lr_idx; i++)
322 __gic_v3_set_lr(0, i);
323 }
324
325 u64 __hyp_text __vgic_v3_get_ich_vtr_el2(void)
326 {
327 return read_gicreg(ICH_VTR_EL2);
328 }