]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - virt/kvm/arm/hyp/vgic-v3-sr.c
61c9831d6528962a5b274d6ec652dc336b8a16d6
[mirror_ubuntu-zesty-kernel.git] / virt / kvm / arm / hyp / vgic-v3-sr.c
1 /*
2 * Copyright (C) 2012-2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include <linux/compiler.h>
19 #include <linux/irqchip/arm-gic-v3.h>
20 #include <linux/kvm_host.h>
21
22 #include <asm/kvm_emulate.h>
23 #include <asm/kvm_hyp.h>
24
25 #define vtr_to_max_lr_idx(v) ((v) & 0xf)
26 #define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1)
27
28 static u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
29 {
30 switch (lr & 0xf) {
31 case 0:
32 return read_gicreg(ICH_LR0_EL2);
33 case 1:
34 return read_gicreg(ICH_LR1_EL2);
35 case 2:
36 return read_gicreg(ICH_LR2_EL2);
37 case 3:
38 return read_gicreg(ICH_LR3_EL2);
39 case 4:
40 return read_gicreg(ICH_LR4_EL2);
41 case 5:
42 return read_gicreg(ICH_LR5_EL2);
43 case 6:
44 return read_gicreg(ICH_LR6_EL2);
45 case 7:
46 return read_gicreg(ICH_LR7_EL2);
47 case 8:
48 return read_gicreg(ICH_LR8_EL2);
49 case 9:
50 return read_gicreg(ICH_LR9_EL2);
51 case 10:
52 return read_gicreg(ICH_LR10_EL2);
53 case 11:
54 return read_gicreg(ICH_LR11_EL2);
55 case 12:
56 return read_gicreg(ICH_LR12_EL2);
57 case 13:
58 return read_gicreg(ICH_LR13_EL2);
59 case 14:
60 return read_gicreg(ICH_LR14_EL2);
61 case 15:
62 return read_gicreg(ICH_LR15_EL2);
63 }
64
65 unreachable();
66 }
67
68 static void __hyp_text __gic_v3_set_lr(u64 val, int lr)
69 {
70 switch (lr & 0xf) {
71 case 0:
72 write_gicreg(val, ICH_LR0_EL2);
73 break;
74 case 1:
75 write_gicreg(val, ICH_LR1_EL2);
76 break;
77 case 2:
78 write_gicreg(val, ICH_LR2_EL2);
79 break;
80 case 3:
81 write_gicreg(val, ICH_LR3_EL2);
82 break;
83 case 4:
84 write_gicreg(val, ICH_LR4_EL2);
85 break;
86 case 5:
87 write_gicreg(val, ICH_LR5_EL2);
88 break;
89 case 6:
90 write_gicreg(val, ICH_LR6_EL2);
91 break;
92 case 7:
93 write_gicreg(val, ICH_LR7_EL2);
94 break;
95 case 8:
96 write_gicreg(val, ICH_LR8_EL2);
97 break;
98 case 9:
99 write_gicreg(val, ICH_LR9_EL2);
100 break;
101 case 10:
102 write_gicreg(val, ICH_LR10_EL2);
103 break;
104 case 11:
105 write_gicreg(val, ICH_LR11_EL2);
106 break;
107 case 12:
108 write_gicreg(val, ICH_LR12_EL2);
109 break;
110 case 13:
111 write_gicreg(val, ICH_LR13_EL2);
112 break;
113 case 14:
114 write_gicreg(val, ICH_LR14_EL2);
115 break;
116 case 15:
117 write_gicreg(val, ICH_LR15_EL2);
118 break;
119 }
120 }
121
122 static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu, int nr_lr)
123 {
124 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
125 int i;
126 bool expect_mi;
127
128 expect_mi = !!(cpu_if->vgic_hcr & ICH_HCR_UIE);
129
130 for (i = 0; i < nr_lr; i++) {
131 if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
132 continue;
133
134 expect_mi |= (!(cpu_if->vgic_lr[i] & ICH_LR_HW) &&
135 (cpu_if->vgic_lr[i] & ICH_LR_EOI));
136 }
137
138 if (expect_mi) {
139 cpu_if->vgic_misr = read_gicreg(ICH_MISR_EL2);
140
141 if (cpu_if->vgic_misr & ICH_MISR_EOI)
142 cpu_if->vgic_eisr = read_gicreg(ICH_EISR_EL2);
143 else
144 cpu_if->vgic_eisr = 0;
145 } else {
146 cpu_if->vgic_misr = 0;
147 cpu_if->vgic_eisr = 0;
148 }
149 }
150
151 static void __hyp_text __vgic_v3_write_ap0rn(u32 val, int n)
152 {
153 switch (n) {
154 case 0:
155 write_gicreg(val, ICH_AP0R0_EL2);
156 break;
157 case 1:
158 write_gicreg(val, ICH_AP0R1_EL2);
159 break;
160 case 2:
161 write_gicreg(val, ICH_AP0R2_EL2);
162 break;
163 case 3:
164 write_gicreg(val, ICH_AP0R3_EL2);
165 break;
166 }
167 }
168
169 static void __hyp_text __vgic_v3_write_ap1rn(u32 val, int n)
170 {
171 switch (n) {
172 case 0:
173 write_gicreg(val, ICH_AP1R0_EL2);
174 break;
175 case 1:
176 write_gicreg(val, ICH_AP1R1_EL2);
177 break;
178 case 2:
179 write_gicreg(val, ICH_AP1R2_EL2);
180 break;
181 case 3:
182 write_gicreg(val, ICH_AP1R3_EL2);
183 break;
184 }
185 }
186
187 static u32 __hyp_text __vgic_v3_read_ap0rn(int n)
188 {
189 u32 val;
190
191 switch (n) {
192 case 0:
193 val = read_gicreg(ICH_AP0R0_EL2);
194 break;
195 case 1:
196 val = read_gicreg(ICH_AP0R1_EL2);
197 break;
198 case 2:
199 val = read_gicreg(ICH_AP0R2_EL2);
200 break;
201 case 3:
202 val = read_gicreg(ICH_AP0R3_EL2);
203 break;
204 default:
205 unreachable();
206 }
207
208 return val;
209 }
210
211 static u32 __hyp_text __vgic_v3_read_ap1rn(int n)
212 {
213 u32 val;
214
215 switch (n) {
216 case 0:
217 val = read_gicreg(ICH_AP1R0_EL2);
218 break;
219 case 1:
220 val = read_gicreg(ICH_AP1R1_EL2);
221 break;
222 case 2:
223 val = read_gicreg(ICH_AP1R2_EL2);
224 break;
225 case 3:
226 val = read_gicreg(ICH_AP1R3_EL2);
227 break;
228 default:
229 unreachable();
230 }
231
232 return val;
233 }
234
235 void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
236 {
237 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
238 u64 val;
239
240 /*
241 * Make sure stores to the GIC via the memory mapped interface
242 * are now visible to the system register interface.
243 */
244 if (!cpu_if->vgic_sre)
245 dsb(st);
246
247 cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
248
249 if (vcpu->arch.vgic_cpu.live_lrs) {
250 int i;
251 u32 max_lr_idx, nr_pre_bits;
252
253 cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2);
254
255 write_gicreg(0, ICH_HCR_EL2);
256 val = read_gicreg(ICH_VTR_EL2);
257 max_lr_idx = vtr_to_max_lr_idx(val);
258 nr_pre_bits = vtr_to_nr_pre_bits(val);
259
260 save_maint_int_state(vcpu, max_lr_idx + 1);
261
262 for (i = 0; i <= max_lr_idx; i++) {
263 if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
264 continue;
265
266 if (cpu_if->vgic_elrsr & (1 << i))
267 cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
268 else
269 cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
270
271 __gic_v3_set_lr(0, i);
272 }
273
274 switch (nr_pre_bits) {
275 case 7:
276 cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3);
277 cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2);
278 case 6:
279 cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1);
280 default:
281 cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0);
282 }
283
284 switch (nr_pre_bits) {
285 case 7:
286 cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3);
287 cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2);
288 case 6:
289 cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1);
290 default:
291 cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
292 }
293
294 vcpu->arch.vgic_cpu.live_lrs = 0;
295 } else {
296 cpu_if->vgic_misr = 0;
297 cpu_if->vgic_eisr = 0;
298 cpu_if->vgic_elrsr = 0xffff;
299 cpu_if->vgic_ap0r[0] = 0;
300 cpu_if->vgic_ap0r[1] = 0;
301 cpu_if->vgic_ap0r[2] = 0;
302 cpu_if->vgic_ap0r[3] = 0;
303 cpu_if->vgic_ap1r[0] = 0;
304 cpu_if->vgic_ap1r[1] = 0;
305 cpu_if->vgic_ap1r[2] = 0;
306 cpu_if->vgic_ap1r[3] = 0;
307 }
308
309 val = read_gicreg(ICC_SRE_EL2);
310 write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
311
312 if (!cpu_if->vgic_sre) {
313 /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
314 isb();
315 write_gicreg(1, ICC_SRE_EL1);
316 }
317 }
318
319 void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
320 {
321 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
322 u64 val;
323 u32 max_lr_idx, nr_pre_bits;
324 u16 live_lrs = 0;
325 int i;
326
327 /*
328 * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
329 * Group0 interrupt (as generated in GICv2 mode) to be
330 * delivered as a FIQ to the guest, with potentially fatal
331 * consequences. So we must make sure that ICC_SRE_EL1 has
332 * been actually programmed with the value we want before
333 * starting to mess with the rest of the GIC.
334 */
335 if (!cpu_if->vgic_sre) {
336 write_gicreg(0, ICC_SRE_EL1);
337 isb();
338 }
339
340 val = read_gicreg(ICH_VTR_EL2);
341 max_lr_idx = vtr_to_max_lr_idx(val);
342 nr_pre_bits = vtr_to_nr_pre_bits(val);
343
344 for (i = 0; i <= max_lr_idx; i++) {
345 if (cpu_if->vgic_lr[i] & ICH_LR_STATE)
346 live_lrs |= (1 << i);
347 }
348
349 write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
350
351 if (live_lrs) {
352 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
353
354 switch (nr_pre_bits) {
355 case 7:
356 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3);
357 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2);
358 case 6:
359 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1);
360 default:
361 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0);
362 }
363
364 switch (nr_pre_bits) {
365 case 7:
366 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3);
367 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2);
368 case 6:
369 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1);
370 default:
371 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0);
372 }
373
374 for (i = 0; i <= max_lr_idx; i++) {
375 if (!(live_lrs & (1 << i)))
376 continue;
377
378 __gic_v3_set_lr(cpu_if->vgic_lr[i], i);
379 }
380 }
381
382 /*
383 * Ensures that the above will have reached the
384 * (re)distributors. This ensure the guest will read the
385 * correct values from the memory-mapped interface.
386 */
387 if (!cpu_if->vgic_sre) {
388 isb();
389 dsb(sy);
390 }
391 vcpu->arch.vgic_cpu.live_lrs = live_lrs;
392
393 /*
394 * Prevent the guest from touching the GIC system registers if
395 * SRE isn't enabled for GICv3 emulation.
396 */
397 write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
398 ICC_SRE_EL2);
399 }
400
401 void __hyp_text __vgic_v3_init_lrs(void)
402 {
403 int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2));
404 int i;
405
406 for (i = 0; i <= max_lr_idx; i++)
407 __gic_v3_set_lr(0, i);
408 }
409
410 u64 __hyp_text __vgic_v3_get_ich_vtr_el2(void)
411 {
412 return read_gicreg(ICH_VTR_EL2);
413 }
414
415 u64 __hyp_text __vgic_v3_read_vmcr(void)
416 {
417 return read_gicreg(ICH_VMCR_EL2);
418 }
419
420 void __hyp_text __vgic_v3_write_vmcr(u32 vmcr)
421 {
422 write_gicreg(vmcr, ICH_VMCR_EL2);
423 }
424
425 #ifdef CONFIG_ARM64
426
427 static int __hyp_text __vgic_v3_bpr_min(void)
428 {
429 /* See Pseudocode for VPriorityGroup */
430 return 8 - vtr_to_nr_pre_bits(read_gicreg(ICH_VTR_EL2));
431 }
432
433 static unsigned int __hyp_text __vgic_v3_get_bpr0(u32 vmcr)
434 {
435 return (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
436 }
437
438 static unsigned int __hyp_text __vgic_v3_get_bpr1(u32 vmcr)
439 {
440 unsigned int bpr;
441
442 if (vmcr & ICH_VMCR_CBPR_MASK) {
443 bpr = __vgic_v3_get_bpr0(vmcr);
444 if (bpr < 7)
445 bpr++;
446 } else {
447 bpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
448 }
449
450 return bpr;
451 }
452
453 static void __hyp_text __vgic_v3_read_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
454 {
455 vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG1_MASK));
456 }
457
458 static void __hyp_text __vgic_v3_write_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
459 {
460 u64 val = vcpu_get_reg(vcpu, rt);
461
462 if (val & 1)
463 vmcr |= ICH_VMCR_ENG1_MASK;
464 else
465 vmcr &= ~ICH_VMCR_ENG1_MASK;
466
467 __vgic_v3_write_vmcr(vmcr);
468 }
469
470 static void __hyp_text __vgic_v3_read_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
471 {
472 vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr1(vmcr));
473 }
474
475 static void __hyp_text __vgic_v3_write_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
476 {
477 u64 val = vcpu_get_reg(vcpu, rt);
478 u8 bpr_min = __vgic_v3_bpr_min();
479
480 if (vmcr & ICH_VMCR_CBPR_MASK)
481 return;
482
483 /* Enforce BPR limiting */
484 if (val < bpr_min)
485 val = bpr_min;
486
487 val <<= ICH_VMCR_BPR1_SHIFT;
488 val &= ICH_VMCR_BPR1_MASK;
489 vmcr &= ~ICH_VMCR_BPR1_MASK;
490 vmcr |= val;
491
492 __vgic_v3_write_vmcr(vmcr);
493 }
494
495 int __hyp_text __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
496 {
497 int rt;
498 u32 esr;
499 u32 vmcr;
500 void (*fn)(struct kvm_vcpu *, u32, int);
501 bool is_read;
502 u32 sysreg;
503
504 esr = kvm_vcpu_get_hsr(vcpu);
505 if (vcpu_mode_is_32bit(vcpu)) {
506 if (!kvm_condition_valid(vcpu))
507 return 1;
508
509 sysreg = esr_cp15_to_sysreg(esr);
510 } else {
511 sysreg = esr_sys64_to_sysreg(esr);
512 }
513
514 is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ;
515
516 switch (sysreg) {
517 case ICC_GRPEN1_EL1:
518 if (is_read)
519 fn = __vgic_v3_read_igrpen1;
520 else
521 fn = __vgic_v3_write_igrpen1;
522 break;
523 case ICC_BPR1_EL1:
524 if (is_read)
525 fn = __vgic_v3_read_bpr1;
526 else
527 fn = __vgic_v3_write_bpr1;
528 break;
529 default:
530 return 0;
531 }
532
533 vmcr = __vgic_v3_read_vmcr();
534 rt = kvm_vcpu_sys_get_rt(vcpu);
535 fn(vcpu, vmcr, rt);
536
537 return 1;
538 }
539
540 #endif