]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - virt/kvm/arm/hyp/vgic-v3-sr.c
KVM: arm64: vgic-v3: Add ICV_CTLR_EL1 handler
[mirror_ubuntu-zesty-kernel.git] / virt / kvm / arm / hyp / vgic-v3-sr.c
1 /*
2 * Copyright (C) 2012-2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include <linux/compiler.h>
19 #include <linux/irqchip/arm-gic-v3.h>
20 #include <linux/kvm_host.h>
21
22 #include <asm/kvm_emulate.h>
23 #include <asm/kvm_hyp.h>
24
25 #define vtr_to_max_lr_idx(v) ((v) & 0xf)
26 #define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1)
27 #define vtr_to_nr_apr_regs(v) (1 << (vtr_to_nr_pre_bits(v) - 5))
28
29 static u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
30 {
31 switch (lr & 0xf) {
32 case 0:
33 return read_gicreg(ICH_LR0_EL2);
34 case 1:
35 return read_gicreg(ICH_LR1_EL2);
36 case 2:
37 return read_gicreg(ICH_LR2_EL2);
38 case 3:
39 return read_gicreg(ICH_LR3_EL2);
40 case 4:
41 return read_gicreg(ICH_LR4_EL2);
42 case 5:
43 return read_gicreg(ICH_LR5_EL2);
44 case 6:
45 return read_gicreg(ICH_LR6_EL2);
46 case 7:
47 return read_gicreg(ICH_LR7_EL2);
48 case 8:
49 return read_gicreg(ICH_LR8_EL2);
50 case 9:
51 return read_gicreg(ICH_LR9_EL2);
52 case 10:
53 return read_gicreg(ICH_LR10_EL2);
54 case 11:
55 return read_gicreg(ICH_LR11_EL2);
56 case 12:
57 return read_gicreg(ICH_LR12_EL2);
58 case 13:
59 return read_gicreg(ICH_LR13_EL2);
60 case 14:
61 return read_gicreg(ICH_LR14_EL2);
62 case 15:
63 return read_gicreg(ICH_LR15_EL2);
64 }
65
66 unreachable();
67 }
68
69 static void __hyp_text __gic_v3_set_lr(u64 val, int lr)
70 {
71 switch (lr & 0xf) {
72 case 0:
73 write_gicreg(val, ICH_LR0_EL2);
74 break;
75 case 1:
76 write_gicreg(val, ICH_LR1_EL2);
77 break;
78 case 2:
79 write_gicreg(val, ICH_LR2_EL2);
80 break;
81 case 3:
82 write_gicreg(val, ICH_LR3_EL2);
83 break;
84 case 4:
85 write_gicreg(val, ICH_LR4_EL2);
86 break;
87 case 5:
88 write_gicreg(val, ICH_LR5_EL2);
89 break;
90 case 6:
91 write_gicreg(val, ICH_LR6_EL2);
92 break;
93 case 7:
94 write_gicreg(val, ICH_LR7_EL2);
95 break;
96 case 8:
97 write_gicreg(val, ICH_LR8_EL2);
98 break;
99 case 9:
100 write_gicreg(val, ICH_LR9_EL2);
101 break;
102 case 10:
103 write_gicreg(val, ICH_LR10_EL2);
104 break;
105 case 11:
106 write_gicreg(val, ICH_LR11_EL2);
107 break;
108 case 12:
109 write_gicreg(val, ICH_LR12_EL2);
110 break;
111 case 13:
112 write_gicreg(val, ICH_LR13_EL2);
113 break;
114 case 14:
115 write_gicreg(val, ICH_LR14_EL2);
116 break;
117 case 15:
118 write_gicreg(val, ICH_LR15_EL2);
119 break;
120 }
121 }
122
123 static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu, int nr_lr)
124 {
125 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
126 int i;
127 bool expect_mi;
128
129 expect_mi = !!(cpu_if->vgic_hcr & ICH_HCR_UIE);
130
131 for (i = 0; i < nr_lr; i++) {
132 if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
133 continue;
134
135 expect_mi |= (!(cpu_if->vgic_lr[i] & ICH_LR_HW) &&
136 (cpu_if->vgic_lr[i] & ICH_LR_EOI));
137 }
138
139 if (expect_mi) {
140 cpu_if->vgic_misr = read_gicreg(ICH_MISR_EL2);
141
142 if (cpu_if->vgic_misr & ICH_MISR_EOI)
143 cpu_if->vgic_eisr = read_gicreg(ICH_EISR_EL2);
144 else
145 cpu_if->vgic_eisr = 0;
146 } else {
147 cpu_if->vgic_misr = 0;
148 cpu_if->vgic_eisr = 0;
149 }
150 }
151
152 static void __hyp_text __vgic_v3_write_ap0rn(u32 val, int n)
153 {
154 switch (n) {
155 case 0:
156 write_gicreg(val, ICH_AP0R0_EL2);
157 break;
158 case 1:
159 write_gicreg(val, ICH_AP0R1_EL2);
160 break;
161 case 2:
162 write_gicreg(val, ICH_AP0R2_EL2);
163 break;
164 case 3:
165 write_gicreg(val, ICH_AP0R3_EL2);
166 break;
167 }
168 }
169
170 static void __hyp_text __vgic_v3_write_ap1rn(u32 val, int n)
171 {
172 switch (n) {
173 case 0:
174 write_gicreg(val, ICH_AP1R0_EL2);
175 break;
176 case 1:
177 write_gicreg(val, ICH_AP1R1_EL2);
178 break;
179 case 2:
180 write_gicreg(val, ICH_AP1R2_EL2);
181 break;
182 case 3:
183 write_gicreg(val, ICH_AP1R3_EL2);
184 break;
185 }
186 }
187
188 static u32 __hyp_text __vgic_v3_read_ap0rn(int n)
189 {
190 u32 val;
191
192 switch (n) {
193 case 0:
194 val = read_gicreg(ICH_AP0R0_EL2);
195 break;
196 case 1:
197 val = read_gicreg(ICH_AP0R1_EL2);
198 break;
199 case 2:
200 val = read_gicreg(ICH_AP0R2_EL2);
201 break;
202 case 3:
203 val = read_gicreg(ICH_AP0R3_EL2);
204 break;
205 default:
206 unreachable();
207 }
208
209 return val;
210 }
211
212 static u32 __hyp_text __vgic_v3_read_ap1rn(int n)
213 {
214 u32 val;
215
216 switch (n) {
217 case 0:
218 val = read_gicreg(ICH_AP1R0_EL2);
219 break;
220 case 1:
221 val = read_gicreg(ICH_AP1R1_EL2);
222 break;
223 case 2:
224 val = read_gicreg(ICH_AP1R2_EL2);
225 break;
226 case 3:
227 val = read_gicreg(ICH_AP1R3_EL2);
228 break;
229 default:
230 unreachable();
231 }
232
233 return val;
234 }
235
236 void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
237 {
238 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
239 u64 val;
240
241 /*
242 * Make sure stores to the GIC via the memory mapped interface
243 * are now visible to the system register interface.
244 */
245 if (!cpu_if->vgic_sre)
246 dsb(st);
247
248 cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
249
250 if (vcpu->arch.vgic_cpu.live_lrs) {
251 int i;
252 u32 max_lr_idx, nr_pre_bits;
253
254 cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2);
255
256 write_gicreg(0, ICH_HCR_EL2);
257 val = read_gicreg(ICH_VTR_EL2);
258 max_lr_idx = vtr_to_max_lr_idx(val);
259 nr_pre_bits = vtr_to_nr_pre_bits(val);
260
261 save_maint_int_state(vcpu, max_lr_idx + 1);
262
263 for (i = 0; i <= max_lr_idx; i++) {
264 if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
265 continue;
266
267 if (cpu_if->vgic_elrsr & (1 << i))
268 cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
269 else
270 cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
271
272 __gic_v3_set_lr(0, i);
273 }
274
275 switch (nr_pre_bits) {
276 case 7:
277 cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3);
278 cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2);
279 case 6:
280 cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1);
281 default:
282 cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0);
283 }
284
285 switch (nr_pre_bits) {
286 case 7:
287 cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3);
288 cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2);
289 case 6:
290 cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1);
291 default:
292 cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
293 }
294
295 vcpu->arch.vgic_cpu.live_lrs = 0;
296 } else {
297 if (static_branch_unlikely(&vgic_v3_cpuif_trap))
298 write_gicreg(0, ICH_HCR_EL2);
299
300 cpu_if->vgic_misr = 0;
301 cpu_if->vgic_eisr = 0;
302 cpu_if->vgic_elrsr = 0xffff;
303 cpu_if->vgic_ap0r[0] = 0;
304 cpu_if->vgic_ap0r[1] = 0;
305 cpu_if->vgic_ap0r[2] = 0;
306 cpu_if->vgic_ap0r[3] = 0;
307 cpu_if->vgic_ap1r[0] = 0;
308 cpu_if->vgic_ap1r[1] = 0;
309 cpu_if->vgic_ap1r[2] = 0;
310 cpu_if->vgic_ap1r[3] = 0;
311 }
312
313 val = read_gicreg(ICC_SRE_EL2);
314 write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
315
316 if (!cpu_if->vgic_sre) {
317 /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
318 isb();
319 write_gicreg(1, ICC_SRE_EL1);
320 }
321 }
322
323 void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
324 {
325 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
326 u64 val;
327 u32 max_lr_idx, nr_pre_bits;
328 u16 live_lrs = 0;
329 int i;
330
331 /*
332 * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
333 * Group0 interrupt (as generated in GICv2 mode) to be
334 * delivered as a FIQ to the guest, with potentially fatal
335 * consequences. So we must make sure that ICC_SRE_EL1 has
336 * been actually programmed with the value we want before
337 * starting to mess with the rest of the GIC.
338 */
339 if (!cpu_if->vgic_sre) {
340 write_gicreg(0, ICC_SRE_EL1);
341 isb();
342 }
343
344 val = read_gicreg(ICH_VTR_EL2);
345 max_lr_idx = vtr_to_max_lr_idx(val);
346 nr_pre_bits = vtr_to_nr_pre_bits(val);
347
348 for (i = 0; i <= max_lr_idx; i++) {
349 if (cpu_if->vgic_lr[i] & ICH_LR_STATE)
350 live_lrs |= (1 << i);
351 }
352
353 write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
354
355 if (live_lrs) {
356 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
357
358 switch (nr_pre_bits) {
359 case 7:
360 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3);
361 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2);
362 case 6:
363 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1);
364 default:
365 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0);
366 }
367
368 switch (nr_pre_bits) {
369 case 7:
370 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3);
371 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2);
372 case 6:
373 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1);
374 default:
375 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0);
376 }
377
378 for (i = 0; i <= max_lr_idx; i++) {
379 if (!(live_lrs & (1 << i)))
380 continue;
381
382 __gic_v3_set_lr(cpu_if->vgic_lr[i], i);
383 }
384 } else {
385 /*
386 * If we need to trap system registers, we must write
387 * ICH_HCR_EL2 anyway, even if no interrupts are being
388 * injected,
389 */
390 if (static_branch_unlikely(&vgic_v3_cpuif_trap))
391 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
392 }
393
394 /*
395 * Ensures that the above will have reached the
396 * (re)distributors. This ensure the guest will read the
397 * correct values from the memory-mapped interface.
398 */
399 if (!cpu_if->vgic_sre) {
400 isb();
401 dsb(sy);
402 }
403 vcpu->arch.vgic_cpu.live_lrs = live_lrs;
404
405 /*
406 * Prevent the guest from touching the GIC system registers if
407 * SRE isn't enabled for GICv3 emulation.
408 */
409 write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
410 ICC_SRE_EL2);
411 }
412
413 void __hyp_text __vgic_v3_init_lrs(void)
414 {
415 int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2));
416 int i;
417
418 for (i = 0; i <= max_lr_idx; i++)
419 __gic_v3_set_lr(0, i);
420 }
421
422 u64 __hyp_text __vgic_v3_get_ich_vtr_el2(void)
423 {
424 return read_gicreg(ICH_VTR_EL2);
425 }
426
427 u64 __hyp_text __vgic_v3_read_vmcr(void)
428 {
429 return read_gicreg(ICH_VMCR_EL2);
430 }
431
432 void __hyp_text __vgic_v3_write_vmcr(u32 vmcr)
433 {
434 write_gicreg(vmcr, ICH_VMCR_EL2);
435 }
436
437 #ifdef CONFIG_ARM64
438
439 static int __hyp_text __vgic_v3_bpr_min(void)
440 {
441 /* See Pseudocode for VPriorityGroup */
442 return 8 - vtr_to_nr_pre_bits(read_gicreg(ICH_VTR_EL2));
443 }
444
445 static int __hyp_text __vgic_v3_get_group(struct kvm_vcpu *vcpu)
446 {
447 u32 esr = kvm_vcpu_get_hsr(vcpu);
448 u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
449
450 return crm != 8;
451 }
452
453 #define GICv3_IDLE_PRIORITY 0xff
454
455 static int __hyp_text __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu,
456 u32 vmcr,
457 u64 *lr_val)
458 {
459 unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs;
460 u8 priority = GICv3_IDLE_PRIORITY;
461 int i, lr = -1;
462
463 for (i = 0; i < used_lrs; i++) {
464 u64 val = __gic_v3_get_lr(i);
465 u8 lr_prio = (val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
466
467 /* Not pending in the state? */
468 if ((val & ICH_LR_STATE) != ICH_LR_PENDING_BIT)
469 continue;
470
471 /* Group-0 interrupt, but Group-0 disabled? */
472 if (!(val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG0_MASK))
473 continue;
474
475 /* Group-1 interrupt, but Group-1 disabled? */
476 if ((val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG1_MASK))
477 continue;
478
479 /* Not the highest priority? */
480 if (lr_prio >= priority)
481 continue;
482
483 /* This is a candidate */
484 priority = lr_prio;
485 *lr_val = val;
486 lr = i;
487 }
488
489 if (lr == -1)
490 *lr_val = ICC_IAR1_EL1_SPURIOUS;
491
492 return lr;
493 }
494
495 static int __hyp_text __vgic_v3_find_active_lr(struct kvm_vcpu *vcpu,
496 int intid, u64 *lr_val)
497 {
498 unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs;
499 int i;
500
501 for (i = 0; i < used_lrs; i++) {
502 u64 val = __gic_v3_get_lr(i);
503
504 if ((val & ICH_LR_VIRTUAL_ID_MASK) == intid &&
505 (val & ICH_LR_ACTIVE_BIT)) {
506 *lr_val = val;
507 return i;
508 }
509 }
510
511 *lr_val = ICC_IAR1_EL1_SPURIOUS;
512 return -1;
513 }
514
515 static int __hyp_text __vgic_v3_get_highest_active_priority(void)
516 {
517 u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
518 u32 hap = 0;
519 int i;
520
521 for (i = 0; i < nr_apr_regs; i++) {
522 u32 val;
523
524 /*
525 * The ICH_AP0Rn_EL2 and ICH_AP1Rn_EL2 registers
526 * contain the active priority levels for this VCPU
527 * for the maximum number of supported priority
528 * levels, and we return the full priority level only
529 * if the BPR is programmed to its minimum, otherwise
530 * we return a combination of the priority level and
531 * subpriority, as determined by the setting of the
532 * BPR, but without the full subpriority.
533 */
534 val = __vgic_v3_read_ap0rn(i);
535 val |= __vgic_v3_read_ap1rn(i);
536 if (!val) {
537 hap += 32;
538 continue;
539 }
540
541 return (hap + __ffs(val)) << __vgic_v3_bpr_min();
542 }
543
544 return GICv3_IDLE_PRIORITY;
545 }
546
547 static unsigned int __hyp_text __vgic_v3_get_bpr0(u32 vmcr)
548 {
549 return (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
550 }
551
552 static unsigned int __hyp_text __vgic_v3_get_bpr1(u32 vmcr)
553 {
554 unsigned int bpr;
555
556 if (vmcr & ICH_VMCR_CBPR_MASK) {
557 bpr = __vgic_v3_get_bpr0(vmcr);
558 if (bpr < 7)
559 bpr++;
560 } else {
561 bpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
562 }
563
564 return bpr;
565 }
566
567 /*
568 * Convert a priority to a preemption level, taking the relevant BPR
569 * into account by zeroing the sub-priority bits.
570 */
571 static u8 __hyp_text __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp)
572 {
573 unsigned int bpr;
574
575 if (!grp)
576 bpr = __vgic_v3_get_bpr0(vmcr) + 1;
577 else
578 bpr = __vgic_v3_get_bpr1(vmcr);
579
580 return pri & (GENMASK(7, 0) << bpr);
581 }
582
583 /*
584 * The priority value is independent of any of the BPR values, so we
585 * normalize it using the minumal BPR value. This guarantees that no
586 * matter what the guest does with its BPR, we can always set/get the
587 * same value of a priority.
588 */
589 static void __hyp_text __vgic_v3_set_active_priority(u8 pri, u32 vmcr, int grp)
590 {
591 u8 pre, ap;
592 u32 val;
593 int apr;
594
595 pre = __vgic_v3_pri_to_pre(pri, vmcr, grp);
596 ap = pre >> __vgic_v3_bpr_min();
597 apr = ap / 32;
598
599 if (!grp) {
600 val = __vgic_v3_read_ap0rn(apr);
601 __vgic_v3_write_ap0rn(val | BIT(ap % 32), apr);
602 } else {
603 val = __vgic_v3_read_ap1rn(apr);
604 __vgic_v3_write_ap1rn(val | BIT(ap % 32), apr);
605 }
606 }
607
608 static int __hyp_text __vgic_v3_clear_highest_active_priority(void)
609 {
610 u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
611 u32 hap = 0;
612 int i;
613
614 for (i = 0; i < nr_apr_regs; i++) {
615 u32 ap0, ap1;
616 int c0, c1;
617
618 ap0 = __vgic_v3_read_ap0rn(i);
619 ap1 = __vgic_v3_read_ap1rn(i);
620 if (!ap0 && !ap1) {
621 hap += 32;
622 continue;
623 }
624
625 c0 = ap0 ? __ffs(ap0) : 32;
626 c1 = ap1 ? __ffs(ap1) : 32;
627
628 /* Always clear the LSB, which is the highest priority */
629 if (c0 < c1) {
630 ap0 &= ~BIT(c0);
631 __vgic_v3_write_ap0rn(ap0, i);
632 hap += c0;
633 } else {
634 ap1 &= ~BIT(c1);
635 __vgic_v3_write_ap1rn(ap1, i);
636 hap += c1;
637 }
638
639 /* Rescale to 8 bits of priority */
640 return hap << __vgic_v3_bpr_min();
641 }
642
643 return GICv3_IDLE_PRIORITY;
644 }
645
646 static void __hyp_text __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
647 {
648 u64 lr_val;
649 u8 lr_prio, pmr;
650 int lr, grp;
651
652 grp = __vgic_v3_get_group(vcpu);
653
654 lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
655 if (lr < 0)
656 goto spurious;
657
658 if (grp != !!(lr_val & ICH_LR_GROUP))
659 goto spurious;
660
661 pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
662 lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
663 if (pmr <= lr_prio)
664 goto spurious;
665
666 if (__vgic_v3_get_highest_active_priority() <= __vgic_v3_pri_to_pre(lr_prio, vmcr, grp))
667 goto spurious;
668
669 lr_val &= ~ICH_LR_STATE;
670 /* No active state for LPIs */
671 if ((lr_val & ICH_LR_VIRTUAL_ID_MASK) <= VGIC_MAX_SPI)
672 lr_val |= ICH_LR_ACTIVE_BIT;
673 __gic_v3_set_lr(lr_val, lr);
674 __vgic_v3_set_active_priority(lr_prio, vmcr, grp);
675 vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
676 return;
677
678 spurious:
679 vcpu_set_reg(vcpu, rt, ICC_IAR1_EL1_SPURIOUS);
680 }
681
682 static void __hyp_text __vgic_v3_clear_active_lr(int lr, u64 lr_val)
683 {
684 lr_val &= ~ICH_LR_ACTIVE_BIT;
685 if (lr_val & ICH_LR_HW) {
686 u32 pid;
687
688 pid = (lr_val & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT;
689 gic_write_dir(pid);
690 }
691
692 __gic_v3_set_lr(lr_val, lr);
693 }
694
695 static void __hyp_text __vgic_v3_bump_eoicount(void)
696 {
697 u32 hcr;
698
699 hcr = read_gicreg(ICH_HCR_EL2);
700 hcr += 1 << ICH_HCR_EOIcount_SHIFT;
701 write_gicreg(hcr, ICH_HCR_EL2);
702 }
703
704 static void __hyp_text __vgic_v3_write_dir(struct kvm_vcpu *vcpu,
705 u32 vmcr, int rt)
706 {
707 u32 vid = vcpu_get_reg(vcpu, rt);
708 u64 lr_val;
709 int lr;
710
711 /* EOImode == 0, nothing to be done here */
712 if (!(vmcr & ICH_VMCR_EOIM_MASK))
713 return;
714
715 /* No deactivate to be performed on an LPI */
716 if (vid >= VGIC_MIN_LPI)
717 return;
718
719 lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
720 if (lr == -1) {
721 __vgic_v3_bump_eoicount();
722 return;
723 }
724
725 __vgic_v3_clear_active_lr(lr, lr_val);
726 }
727
728 static void __hyp_text __vgic_v3_write_eoir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
729 {
730 u32 vid = vcpu_get_reg(vcpu, rt);
731 u64 lr_val;
732 u8 lr_prio, act_prio;
733 int lr, grp;
734
735 grp = __vgic_v3_get_group(vcpu);
736
737 /* Drop priority in any case */
738 act_prio = __vgic_v3_clear_highest_active_priority();
739
740 /* If EOIing an LPI, no deactivate to be performed */
741 if (vid >= VGIC_MIN_LPI)
742 return;
743
744 /* EOImode == 1, nothing to be done here */
745 if (vmcr & ICH_VMCR_EOIM_MASK)
746 return;
747
748 lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
749 if (lr == -1) {
750 __vgic_v3_bump_eoicount();
751 return;
752 }
753
754 lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
755
756 /* If priorities or group do not match, the guest has fscked-up. */
757 if (grp != !!(lr_val & ICH_LR_GROUP) ||
758 __vgic_v3_pri_to_pre(lr_prio, vmcr, grp) != act_prio)
759 return;
760
761 /* Let's now perform the deactivation */
762 __vgic_v3_clear_active_lr(lr, lr_val);
763 }
764
765 static void __hyp_text __vgic_v3_read_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
766 {
767 vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG0_MASK));
768 }
769
770 static void __hyp_text __vgic_v3_read_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
771 {
772 vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG1_MASK));
773 }
774
775 static void __hyp_text __vgic_v3_write_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
776 {
777 u64 val = vcpu_get_reg(vcpu, rt);
778
779 if (val & 1)
780 vmcr |= ICH_VMCR_ENG0_MASK;
781 else
782 vmcr &= ~ICH_VMCR_ENG0_MASK;
783
784 __vgic_v3_write_vmcr(vmcr);
785 }
786
787 static void __hyp_text __vgic_v3_write_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
788 {
789 u64 val = vcpu_get_reg(vcpu, rt);
790
791 if (val & 1)
792 vmcr |= ICH_VMCR_ENG1_MASK;
793 else
794 vmcr &= ~ICH_VMCR_ENG1_MASK;
795
796 __vgic_v3_write_vmcr(vmcr);
797 }
798
799 static void __hyp_text __vgic_v3_read_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
800 {
801 vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr0(vmcr));
802 }
803
804 static void __hyp_text __vgic_v3_read_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
805 {
806 vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr1(vmcr));
807 }
808
809 static void __hyp_text __vgic_v3_write_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
810 {
811 u64 val = vcpu_get_reg(vcpu, rt);
812 u8 bpr_min = __vgic_v3_bpr_min() - 1;
813
814 /* Enforce BPR limiting */
815 if (val < bpr_min)
816 val = bpr_min;
817
818 val <<= ICH_VMCR_BPR0_SHIFT;
819 val &= ICH_VMCR_BPR0_MASK;
820 vmcr &= ~ICH_VMCR_BPR0_MASK;
821 vmcr |= val;
822
823 __vgic_v3_write_vmcr(vmcr);
824 }
825
826 static void __hyp_text __vgic_v3_write_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
827 {
828 u64 val = vcpu_get_reg(vcpu, rt);
829 u8 bpr_min = __vgic_v3_bpr_min();
830
831 if (vmcr & ICH_VMCR_CBPR_MASK)
832 return;
833
834 /* Enforce BPR limiting */
835 if (val < bpr_min)
836 val = bpr_min;
837
838 val <<= ICH_VMCR_BPR1_SHIFT;
839 val &= ICH_VMCR_BPR1_MASK;
840 vmcr &= ~ICH_VMCR_BPR1_MASK;
841 vmcr |= val;
842
843 __vgic_v3_write_vmcr(vmcr);
844 }
845
846 static void __hyp_text __vgic_v3_read_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
847 {
848 u32 val;
849
850 if (!__vgic_v3_get_group(vcpu))
851 val = __vgic_v3_read_ap0rn(n);
852 else
853 val = __vgic_v3_read_ap1rn(n);
854
855 vcpu_set_reg(vcpu, rt, val);
856 }
857
858 static void __hyp_text __vgic_v3_write_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
859 {
860 u32 val = vcpu_get_reg(vcpu, rt);
861
862 if (!__vgic_v3_get_group(vcpu))
863 __vgic_v3_write_ap0rn(val, n);
864 else
865 __vgic_v3_write_ap1rn(val, n);
866 }
867
868 static void __hyp_text __vgic_v3_read_apxr0(struct kvm_vcpu *vcpu,
869 u32 vmcr, int rt)
870 {
871 __vgic_v3_read_apxrn(vcpu, rt, 0);
872 }
873
874 static void __hyp_text __vgic_v3_read_apxr1(struct kvm_vcpu *vcpu,
875 u32 vmcr, int rt)
876 {
877 __vgic_v3_read_apxrn(vcpu, rt, 1);
878 }
879
880 static void __hyp_text __vgic_v3_read_apxr2(struct kvm_vcpu *vcpu,
881 u32 vmcr, int rt)
882 {
883 __vgic_v3_read_apxrn(vcpu, rt, 2);
884 }
885
886 static void __hyp_text __vgic_v3_read_apxr3(struct kvm_vcpu *vcpu,
887 u32 vmcr, int rt)
888 {
889 __vgic_v3_read_apxrn(vcpu, rt, 3);
890 }
891
892 static void __hyp_text __vgic_v3_write_apxr0(struct kvm_vcpu *vcpu,
893 u32 vmcr, int rt)
894 {
895 __vgic_v3_write_apxrn(vcpu, rt, 0);
896 }
897
898 static void __hyp_text __vgic_v3_write_apxr1(struct kvm_vcpu *vcpu,
899 u32 vmcr, int rt)
900 {
901 __vgic_v3_write_apxrn(vcpu, rt, 1);
902 }
903
904 static void __hyp_text __vgic_v3_write_apxr2(struct kvm_vcpu *vcpu,
905 u32 vmcr, int rt)
906 {
907 __vgic_v3_write_apxrn(vcpu, rt, 2);
908 }
909
910 static void __hyp_text __vgic_v3_write_apxr3(struct kvm_vcpu *vcpu,
911 u32 vmcr, int rt)
912 {
913 __vgic_v3_write_apxrn(vcpu, rt, 3);
914 }
915
916 static void __hyp_text __vgic_v3_read_hppir(struct kvm_vcpu *vcpu,
917 u32 vmcr, int rt)
918 {
919 u64 lr_val;
920 int lr, lr_grp, grp;
921
922 grp = __vgic_v3_get_group(vcpu);
923
924 lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
925 if (lr == -1)
926 goto spurious;
927
928 lr_grp = !!(lr_val & ICH_LR_GROUP);
929 if (lr_grp != grp)
930 lr_val = ICC_IAR1_EL1_SPURIOUS;
931
932 spurious:
933 vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
934 }
935
936 static void __hyp_text __vgic_v3_read_rpr(struct kvm_vcpu *vcpu,
937 u32 vmcr, int rt)
938 {
939 u32 val = __vgic_v3_get_highest_active_priority();
940 vcpu_set_reg(vcpu, rt, val);
941 }
942
943 static void __hyp_text __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu,
944 u32 vmcr, int rt)
945 {
946 u32 vtr, val;
947
948 vtr = read_gicreg(ICH_VTR_EL2);
949 /* PRIbits */
950 val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT;
951 /* IDbits */
952 val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT;
953 /* SEIS */
954 val |= ((vtr >> 22) & 1) << ICC_CTLR_EL1_SEIS_SHIFT;
955 /* A3V */
956 val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT;
957 /* EOImode */
958 val |= ((vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT) << ICC_CTLR_EL1_EOImode_SHIFT;
959 /* CBPR */
960 val |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
961
962 vcpu_set_reg(vcpu, rt, val);
963 }
964
965 static void __hyp_text __vgic_v3_write_ctlr(struct kvm_vcpu *vcpu,
966 u32 vmcr, int rt)
967 {
968 u32 val = vcpu_get_reg(vcpu, rt);
969
970 if (val & ICC_CTLR_EL1_CBPR_MASK)
971 vmcr |= ICH_VMCR_CBPR_MASK;
972 else
973 vmcr &= ~ICH_VMCR_CBPR_MASK;
974
975 if (val & ICC_CTLR_EL1_EOImode_MASK)
976 vmcr |= ICH_VMCR_EOIM_MASK;
977 else
978 vmcr &= ~ICH_VMCR_EOIM_MASK;
979
980 write_gicreg(vmcr, ICH_VMCR_EL2);
981 }
982
983 int __hyp_text __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
984 {
985 int rt;
986 u32 esr;
987 u32 vmcr;
988 void (*fn)(struct kvm_vcpu *, u32, int);
989 bool is_read;
990 u32 sysreg;
991
992 esr = kvm_vcpu_get_hsr(vcpu);
993 if (vcpu_mode_is_32bit(vcpu)) {
994 if (!kvm_condition_valid(vcpu))
995 return 1;
996
997 sysreg = esr_cp15_to_sysreg(esr);
998 } else {
999 sysreg = esr_sys64_to_sysreg(esr);
1000 }
1001
1002 is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ;
1003
1004 switch (sysreg) {
1005 case ICC_IAR0_EL1:
1006 case ICC_IAR1_EL1:
1007 fn = __vgic_v3_read_iar;
1008 break;
1009 case ICC_EOIR0_EL1:
1010 case ICC_EOIR1_EL1:
1011 fn = __vgic_v3_write_eoir;
1012 break;
1013 case ICC_GRPEN1_EL1:
1014 if (is_read)
1015 fn = __vgic_v3_read_igrpen1;
1016 else
1017 fn = __vgic_v3_write_igrpen1;
1018 break;
1019 case ICC_BPR1_EL1:
1020 if (is_read)
1021 fn = __vgic_v3_read_bpr1;
1022 else
1023 fn = __vgic_v3_write_bpr1;
1024 break;
1025 case ICC_AP0Rn_EL1(0):
1026 case ICC_AP1Rn_EL1(0):
1027 if (is_read)
1028 fn = __vgic_v3_read_apxr0;
1029 else
1030 fn = __vgic_v3_write_apxr0;
1031 break;
1032 case ICC_AP0Rn_EL1(1):
1033 case ICC_AP1Rn_EL1(1):
1034 if (is_read)
1035 fn = __vgic_v3_read_apxr1;
1036 else
1037 fn = __vgic_v3_write_apxr1;
1038 break;
1039 case ICC_AP0Rn_EL1(2):
1040 case ICC_AP1Rn_EL1(2):
1041 if (is_read)
1042 fn = __vgic_v3_read_apxr2;
1043 else
1044 fn = __vgic_v3_write_apxr2;
1045 break;
1046 case ICC_AP0Rn_EL1(3):
1047 case ICC_AP1Rn_EL1(3):
1048 if (is_read)
1049 fn = __vgic_v3_read_apxr3;
1050 else
1051 fn = __vgic_v3_write_apxr3;
1052 break;
1053 case ICC_HPPIR0_EL1:
1054 case ICC_HPPIR1_EL1:
1055 fn = __vgic_v3_read_hppir;
1056 break;
1057 case ICC_GRPEN0_EL1:
1058 if (is_read)
1059 fn = __vgic_v3_read_igrpen0;
1060 else
1061 fn = __vgic_v3_write_igrpen0;
1062 break;
1063 case ICC_BPR0_EL1:
1064 if (is_read)
1065 fn = __vgic_v3_read_bpr0;
1066 else
1067 fn = __vgic_v3_write_bpr0;
1068 break;
1069 case ICC_DIR_EL1:
1070 fn = __vgic_v3_write_dir;
1071 break;
1072 case ICC_RPR_EL1:
1073 fn = __vgic_v3_read_rpr;
1074 break;
1075 case ICC_CTLR_EL1:
1076 if (is_read)
1077 fn = __vgic_v3_read_ctlr;
1078 else
1079 fn = __vgic_v3_write_ctlr;
1080 break;
1081 default:
1082 return 0;
1083 }
1084
1085 vmcr = __vgic_v3_read_vmcr();
1086 rt = kvm_vcpu_sys_get_rt(vcpu);
1087 fn(vcpu, vmcr, rt);
1088
1089 return 1;
1090 }
1091
1092 #endif