]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - virt/kvm/arm/hyp/vgic-v3-sr.c
cfc9f79db2c6b457e37e6e290916c5f7e4df1c7a
[mirror_ubuntu-zesty-kernel.git] / virt / kvm / arm / hyp / vgic-v3-sr.c
1 /*
2 * Copyright (C) 2012-2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include <linux/compiler.h>
19 #include <linux/irqchip/arm-gic-v3.h>
20 #include <linux/kvm_host.h>
21
22 #include <asm/kvm_hyp.h>
23
24 #define vtr_to_max_lr_idx(v) ((v) & 0xf)
25 #define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1)
26
27 static u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
28 {
29 switch (lr & 0xf) {
30 case 0:
31 return read_gicreg(ICH_LR0_EL2);
32 case 1:
33 return read_gicreg(ICH_LR1_EL2);
34 case 2:
35 return read_gicreg(ICH_LR2_EL2);
36 case 3:
37 return read_gicreg(ICH_LR3_EL2);
38 case 4:
39 return read_gicreg(ICH_LR4_EL2);
40 case 5:
41 return read_gicreg(ICH_LR5_EL2);
42 case 6:
43 return read_gicreg(ICH_LR6_EL2);
44 case 7:
45 return read_gicreg(ICH_LR7_EL2);
46 case 8:
47 return read_gicreg(ICH_LR8_EL2);
48 case 9:
49 return read_gicreg(ICH_LR9_EL2);
50 case 10:
51 return read_gicreg(ICH_LR10_EL2);
52 case 11:
53 return read_gicreg(ICH_LR11_EL2);
54 case 12:
55 return read_gicreg(ICH_LR12_EL2);
56 case 13:
57 return read_gicreg(ICH_LR13_EL2);
58 case 14:
59 return read_gicreg(ICH_LR14_EL2);
60 case 15:
61 return read_gicreg(ICH_LR15_EL2);
62 }
63
64 unreachable();
65 }
66
67 static void __hyp_text __gic_v3_set_lr(u64 val, int lr)
68 {
69 switch (lr & 0xf) {
70 case 0:
71 write_gicreg(val, ICH_LR0_EL2);
72 break;
73 case 1:
74 write_gicreg(val, ICH_LR1_EL2);
75 break;
76 case 2:
77 write_gicreg(val, ICH_LR2_EL2);
78 break;
79 case 3:
80 write_gicreg(val, ICH_LR3_EL2);
81 break;
82 case 4:
83 write_gicreg(val, ICH_LR4_EL2);
84 break;
85 case 5:
86 write_gicreg(val, ICH_LR5_EL2);
87 break;
88 case 6:
89 write_gicreg(val, ICH_LR6_EL2);
90 break;
91 case 7:
92 write_gicreg(val, ICH_LR7_EL2);
93 break;
94 case 8:
95 write_gicreg(val, ICH_LR8_EL2);
96 break;
97 case 9:
98 write_gicreg(val, ICH_LR9_EL2);
99 break;
100 case 10:
101 write_gicreg(val, ICH_LR10_EL2);
102 break;
103 case 11:
104 write_gicreg(val, ICH_LR11_EL2);
105 break;
106 case 12:
107 write_gicreg(val, ICH_LR12_EL2);
108 break;
109 case 13:
110 write_gicreg(val, ICH_LR13_EL2);
111 break;
112 case 14:
113 write_gicreg(val, ICH_LR14_EL2);
114 break;
115 case 15:
116 write_gicreg(val, ICH_LR15_EL2);
117 break;
118 }
119 }
120
121 static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu, int nr_lr)
122 {
123 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
124 int i;
125 bool expect_mi;
126
127 expect_mi = !!(cpu_if->vgic_hcr & ICH_HCR_UIE);
128
129 for (i = 0; i < nr_lr; i++) {
130 if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
131 continue;
132
133 expect_mi |= (!(cpu_if->vgic_lr[i] & ICH_LR_HW) &&
134 (cpu_if->vgic_lr[i] & ICH_LR_EOI));
135 }
136
137 if (expect_mi) {
138 cpu_if->vgic_misr = read_gicreg(ICH_MISR_EL2);
139
140 if (cpu_if->vgic_misr & ICH_MISR_EOI)
141 cpu_if->vgic_eisr = read_gicreg(ICH_EISR_EL2);
142 else
143 cpu_if->vgic_eisr = 0;
144 } else {
145 cpu_if->vgic_misr = 0;
146 cpu_if->vgic_eisr = 0;
147 }
148 }
149
150 static void __hyp_text __vgic_v3_write_ap0rn(u32 val, int n)
151 {
152 switch (n) {
153 case 0:
154 write_gicreg(val, ICH_AP0R0_EL2);
155 break;
156 case 1:
157 write_gicreg(val, ICH_AP0R1_EL2);
158 break;
159 case 2:
160 write_gicreg(val, ICH_AP0R2_EL2);
161 break;
162 case 3:
163 write_gicreg(val, ICH_AP0R3_EL2);
164 break;
165 }
166 }
167
168 static void __hyp_text __vgic_v3_write_ap1rn(u32 val, int n)
169 {
170 switch (n) {
171 case 0:
172 write_gicreg(val, ICH_AP1R0_EL2);
173 break;
174 case 1:
175 write_gicreg(val, ICH_AP1R1_EL2);
176 break;
177 case 2:
178 write_gicreg(val, ICH_AP1R2_EL2);
179 break;
180 case 3:
181 write_gicreg(val, ICH_AP1R3_EL2);
182 break;
183 }
184 }
185
186 static u32 __hyp_text __vgic_v3_read_ap0rn(int n)
187 {
188 u32 val;
189
190 switch (n) {
191 case 0:
192 val = read_gicreg(ICH_AP0R0_EL2);
193 break;
194 case 1:
195 val = read_gicreg(ICH_AP0R1_EL2);
196 break;
197 case 2:
198 val = read_gicreg(ICH_AP0R2_EL2);
199 break;
200 case 3:
201 val = read_gicreg(ICH_AP0R3_EL2);
202 break;
203 default:
204 unreachable();
205 }
206
207 return val;
208 }
209
210 static u32 __hyp_text __vgic_v3_read_ap1rn(int n)
211 {
212 u32 val;
213
214 switch (n) {
215 case 0:
216 val = read_gicreg(ICH_AP1R0_EL2);
217 break;
218 case 1:
219 val = read_gicreg(ICH_AP1R1_EL2);
220 break;
221 case 2:
222 val = read_gicreg(ICH_AP1R2_EL2);
223 break;
224 case 3:
225 val = read_gicreg(ICH_AP1R3_EL2);
226 break;
227 default:
228 unreachable();
229 }
230
231 return val;
232 }
233
234 void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
235 {
236 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
237 u64 val;
238
239 /*
240 * Make sure stores to the GIC via the memory mapped interface
241 * are now visible to the system register interface.
242 */
243 if (!cpu_if->vgic_sre)
244 dsb(st);
245
246 cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
247
248 if (vcpu->arch.vgic_cpu.live_lrs) {
249 int i;
250 u32 max_lr_idx, nr_pre_bits;
251
252 cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2);
253
254 write_gicreg(0, ICH_HCR_EL2);
255 val = read_gicreg(ICH_VTR_EL2);
256 max_lr_idx = vtr_to_max_lr_idx(val);
257 nr_pre_bits = vtr_to_nr_pre_bits(val);
258
259 save_maint_int_state(vcpu, max_lr_idx + 1);
260
261 for (i = 0; i <= max_lr_idx; i++) {
262 if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
263 continue;
264
265 if (cpu_if->vgic_elrsr & (1 << i))
266 cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
267 else
268 cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
269
270 __gic_v3_set_lr(0, i);
271 }
272
273 switch (nr_pre_bits) {
274 case 7:
275 cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3);
276 cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2);
277 case 6:
278 cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1);
279 default:
280 cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0);
281 }
282
283 switch (nr_pre_bits) {
284 case 7:
285 cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3);
286 cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2);
287 case 6:
288 cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1);
289 default:
290 cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
291 }
292
293 vcpu->arch.vgic_cpu.live_lrs = 0;
294 } else {
295 cpu_if->vgic_misr = 0;
296 cpu_if->vgic_eisr = 0;
297 cpu_if->vgic_elrsr = 0xffff;
298 cpu_if->vgic_ap0r[0] = 0;
299 cpu_if->vgic_ap0r[1] = 0;
300 cpu_if->vgic_ap0r[2] = 0;
301 cpu_if->vgic_ap0r[3] = 0;
302 cpu_if->vgic_ap1r[0] = 0;
303 cpu_if->vgic_ap1r[1] = 0;
304 cpu_if->vgic_ap1r[2] = 0;
305 cpu_if->vgic_ap1r[3] = 0;
306 }
307
308 val = read_gicreg(ICC_SRE_EL2);
309 write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
310
311 if (!cpu_if->vgic_sre) {
312 /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
313 isb();
314 write_gicreg(1, ICC_SRE_EL1);
315 }
316 }
317
318 void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
319 {
320 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
321 u64 val;
322 u32 max_lr_idx, nr_pre_bits;
323 u16 live_lrs = 0;
324 int i;
325
326 /*
327 * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
328 * Group0 interrupt (as generated in GICv2 mode) to be
329 * delivered as a FIQ to the guest, with potentially fatal
330 * consequences. So we must make sure that ICC_SRE_EL1 has
331 * been actually programmed with the value we want before
332 * starting to mess with the rest of the GIC.
333 */
334 if (!cpu_if->vgic_sre) {
335 write_gicreg(0, ICC_SRE_EL1);
336 isb();
337 }
338
339 val = read_gicreg(ICH_VTR_EL2);
340 max_lr_idx = vtr_to_max_lr_idx(val);
341 nr_pre_bits = vtr_to_nr_pre_bits(val);
342
343 for (i = 0; i <= max_lr_idx; i++) {
344 if (cpu_if->vgic_lr[i] & ICH_LR_STATE)
345 live_lrs |= (1 << i);
346 }
347
348 write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
349
350 if (live_lrs) {
351 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
352
353 switch (nr_pre_bits) {
354 case 7:
355 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3);
356 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2);
357 case 6:
358 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1);
359 default:
360 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0);
361 }
362
363 switch (nr_pre_bits) {
364 case 7:
365 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3);
366 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2);
367 case 6:
368 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1);
369 default:
370 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0);
371 }
372
373 for (i = 0; i <= max_lr_idx; i++) {
374 if (!(live_lrs & (1 << i)))
375 continue;
376
377 __gic_v3_set_lr(cpu_if->vgic_lr[i], i);
378 }
379 }
380
381 /*
382 * Ensures that the above will have reached the
383 * (re)distributors. This ensure the guest will read the
384 * correct values from the memory-mapped interface.
385 */
386 if (!cpu_if->vgic_sre) {
387 isb();
388 dsb(sy);
389 }
390 vcpu->arch.vgic_cpu.live_lrs = live_lrs;
391
392 /*
393 * Prevent the guest from touching the GIC system registers if
394 * SRE isn't enabled for GICv3 emulation.
395 */
396 write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
397 ICC_SRE_EL2);
398 }
399
400 void __hyp_text __vgic_v3_init_lrs(void)
401 {
402 int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2));
403 int i;
404
405 for (i = 0; i <= max_lr_idx; i++)
406 __gic_v3_set_lr(0, i);
407 }
408
409 u64 __hyp_text __vgic_v3_get_ich_vtr_el2(void)
410 {
411 return read_gicreg(ICH_VTR_EL2);
412 }