]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - virt/kvm/arm/hyp/vgic-v3-sr.c
1af7f94a525592a12b88421d157bf21b8389d891
[mirror_ubuntu-zesty-kernel.git] / virt / kvm / arm / hyp / vgic-v3-sr.c
1 /*
2 * Copyright (C) 2012-2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include <linux/compiler.h>
19 #include <linux/irqchip/arm-gic-v3.h>
20 #include <linux/kvm_host.h>
21
22 #include <asm/kvm_emulate.h>
23 #include <asm/kvm_hyp.h>
24
25 #define vtr_to_max_lr_idx(v) ((v) & 0xf)
26 #define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1)
27 #define vtr_to_nr_apr_regs(v) (1 << (vtr_to_nr_pre_bits(v) - 5))
28
29 static u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
30 {
31 switch (lr & 0xf) {
32 case 0:
33 return read_gicreg(ICH_LR0_EL2);
34 case 1:
35 return read_gicreg(ICH_LR1_EL2);
36 case 2:
37 return read_gicreg(ICH_LR2_EL2);
38 case 3:
39 return read_gicreg(ICH_LR3_EL2);
40 case 4:
41 return read_gicreg(ICH_LR4_EL2);
42 case 5:
43 return read_gicreg(ICH_LR5_EL2);
44 case 6:
45 return read_gicreg(ICH_LR6_EL2);
46 case 7:
47 return read_gicreg(ICH_LR7_EL2);
48 case 8:
49 return read_gicreg(ICH_LR8_EL2);
50 case 9:
51 return read_gicreg(ICH_LR9_EL2);
52 case 10:
53 return read_gicreg(ICH_LR10_EL2);
54 case 11:
55 return read_gicreg(ICH_LR11_EL2);
56 case 12:
57 return read_gicreg(ICH_LR12_EL2);
58 case 13:
59 return read_gicreg(ICH_LR13_EL2);
60 case 14:
61 return read_gicreg(ICH_LR14_EL2);
62 case 15:
63 return read_gicreg(ICH_LR15_EL2);
64 }
65
66 unreachable();
67 }
68
69 static void __hyp_text __gic_v3_set_lr(u64 val, int lr)
70 {
71 switch (lr & 0xf) {
72 case 0:
73 write_gicreg(val, ICH_LR0_EL2);
74 break;
75 case 1:
76 write_gicreg(val, ICH_LR1_EL2);
77 break;
78 case 2:
79 write_gicreg(val, ICH_LR2_EL2);
80 break;
81 case 3:
82 write_gicreg(val, ICH_LR3_EL2);
83 break;
84 case 4:
85 write_gicreg(val, ICH_LR4_EL2);
86 break;
87 case 5:
88 write_gicreg(val, ICH_LR5_EL2);
89 break;
90 case 6:
91 write_gicreg(val, ICH_LR6_EL2);
92 break;
93 case 7:
94 write_gicreg(val, ICH_LR7_EL2);
95 break;
96 case 8:
97 write_gicreg(val, ICH_LR8_EL2);
98 break;
99 case 9:
100 write_gicreg(val, ICH_LR9_EL2);
101 break;
102 case 10:
103 write_gicreg(val, ICH_LR10_EL2);
104 break;
105 case 11:
106 write_gicreg(val, ICH_LR11_EL2);
107 break;
108 case 12:
109 write_gicreg(val, ICH_LR12_EL2);
110 break;
111 case 13:
112 write_gicreg(val, ICH_LR13_EL2);
113 break;
114 case 14:
115 write_gicreg(val, ICH_LR14_EL2);
116 break;
117 case 15:
118 write_gicreg(val, ICH_LR15_EL2);
119 break;
120 }
121 }
122
123 static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu, int nr_lr)
124 {
125 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
126 int i;
127 bool expect_mi;
128
129 expect_mi = !!(cpu_if->vgic_hcr & ICH_HCR_UIE);
130
131 for (i = 0; i < nr_lr; i++) {
132 if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
133 continue;
134
135 expect_mi |= (!(cpu_if->vgic_lr[i] & ICH_LR_HW) &&
136 (cpu_if->vgic_lr[i] & ICH_LR_EOI));
137 }
138
139 if (expect_mi) {
140 cpu_if->vgic_misr = read_gicreg(ICH_MISR_EL2);
141
142 if (cpu_if->vgic_misr & ICH_MISR_EOI)
143 cpu_if->vgic_eisr = read_gicreg(ICH_EISR_EL2);
144 else
145 cpu_if->vgic_eisr = 0;
146 } else {
147 cpu_if->vgic_misr = 0;
148 cpu_if->vgic_eisr = 0;
149 }
150 }
151
152 static void __hyp_text __vgic_v3_write_ap0rn(u32 val, int n)
153 {
154 switch (n) {
155 case 0:
156 write_gicreg(val, ICH_AP0R0_EL2);
157 break;
158 case 1:
159 write_gicreg(val, ICH_AP0R1_EL2);
160 break;
161 case 2:
162 write_gicreg(val, ICH_AP0R2_EL2);
163 break;
164 case 3:
165 write_gicreg(val, ICH_AP0R3_EL2);
166 break;
167 }
168 }
169
170 static void __hyp_text __vgic_v3_write_ap1rn(u32 val, int n)
171 {
172 switch (n) {
173 case 0:
174 write_gicreg(val, ICH_AP1R0_EL2);
175 break;
176 case 1:
177 write_gicreg(val, ICH_AP1R1_EL2);
178 break;
179 case 2:
180 write_gicreg(val, ICH_AP1R2_EL2);
181 break;
182 case 3:
183 write_gicreg(val, ICH_AP1R3_EL2);
184 break;
185 }
186 }
187
188 static u32 __hyp_text __vgic_v3_read_ap0rn(int n)
189 {
190 u32 val;
191
192 switch (n) {
193 case 0:
194 val = read_gicreg(ICH_AP0R0_EL2);
195 break;
196 case 1:
197 val = read_gicreg(ICH_AP0R1_EL2);
198 break;
199 case 2:
200 val = read_gicreg(ICH_AP0R2_EL2);
201 break;
202 case 3:
203 val = read_gicreg(ICH_AP0R3_EL2);
204 break;
205 default:
206 unreachable();
207 }
208
209 return val;
210 }
211
212 static u32 __hyp_text __vgic_v3_read_ap1rn(int n)
213 {
214 u32 val;
215
216 switch (n) {
217 case 0:
218 val = read_gicreg(ICH_AP1R0_EL2);
219 break;
220 case 1:
221 val = read_gicreg(ICH_AP1R1_EL2);
222 break;
223 case 2:
224 val = read_gicreg(ICH_AP1R2_EL2);
225 break;
226 case 3:
227 val = read_gicreg(ICH_AP1R3_EL2);
228 break;
229 default:
230 unreachable();
231 }
232
233 return val;
234 }
235
236 void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
237 {
238 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
239 u64 val;
240
241 /*
242 * Make sure stores to the GIC via the memory mapped interface
243 * are now visible to the system register interface.
244 */
245 if (!cpu_if->vgic_sre)
246 dsb(st);
247
248 cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
249
250 if (vcpu->arch.vgic_cpu.live_lrs) {
251 int i;
252 u32 max_lr_idx, nr_pre_bits;
253
254 cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2);
255
256 write_gicreg(0, ICH_HCR_EL2);
257 val = read_gicreg(ICH_VTR_EL2);
258 max_lr_idx = vtr_to_max_lr_idx(val);
259 nr_pre_bits = vtr_to_nr_pre_bits(val);
260
261 save_maint_int_state(vcpu, max_lr_idx + 1);
262
263 for (i = 0; i <= max_lr_idx; i++) {
264 if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
265 continue;
266
267 if (cpu_if->vgic_elrsr & (1 << i))
268 cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
269 else
270 cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
271
272 __gic_v3_set_lr(0, i);
273 }
274
275 switch (nr_pre_bits) {
276 case 7:
277 cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3);
278 cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2);
279 case 6:
280 cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1);
281 default:
282 cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0);
283 }
284
285 switch (nr_pre_bits) {
286 case 7:
287 cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3);
288 cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2);
289 case 6:
290 cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1);
291 default:
292 cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
293 }
294
295 vcpu->arch.vgic_cpu.live_lrs = 0;
296 } else {
297 cpu_if->vgic_misr = 0;
298 cpu_if->vgic_eisr = 0;
299 cpu_if->vgic_elrsr = 0xffff;
300 cpu_if->vgic_ap0r[0] = 0;
301 cpu_if->vgic_ap0r[1] = 0;
302 cpu_if->vgic_ap0r[2] = 0;
303 cpu_if->vgic_ap0r[3] = 0;
304 cpu_if->vgic_ap1r[0] = 0;
305 cpu_if->vgic_ap1r[1] = 0;
306 cpu_if->vgic_ap1r[2] = 0;
307 cpu_if->vgic_ap1r[3] = 0;
308 }
309
310 val = read_gicreg(ICC_SRE_EL2);
311 write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
312
313 if (!cpu_if->vgic_sre) {
314 /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
315 isb();
316 write_gicreg(1, ICC_SRE_EL1);
317 }
318 }
319
320 void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
321 {
322 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
323 u64 val;
324 u32 max_lr_idx, nr_pre_bits;
325 u16 live_lrs = 0;
326 int i;
327
328 /*
329 * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
330 * Group0 interrupt (as generated in GICv2 mode) to be
331 * delivered as a FIQ to the guest, with potentially fatal
332 * consequences. So we must make sure that ICC_SRE_EL1 has
333 * been actually programmed with the value we want before
334 * starting to mess with the rest of the GIC.
335 */
336 if (!cpu_if->vgic_sre) {
337 write_gicreg(0, ICC_SRE_EL1);
338 isb();
339 }
340
341 val = read_gicreg(ICH_VTR_EL2);
342 max_lr_idx = vtr_to_max_lr_idx(val);
343 nr_pre_bits = vtr_to_nr_pre_bits(val);
344
345 for (i = 0; i <= max_lr_idx; i++) {
346 if (cpu_if->vgic_lr[i] & ICH_LR_STATE)
347 live_lrs |= (1 << i);
348 }
349
350 write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
351
352 if (live_lrs) {
353 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
354
355 switch (nr_pre_bits) {
356 case 7:
357 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3);
358 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2);
359 case 6:
360 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1);
361 default:
362 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0);
363 }
364
365 switch (nr_pre_bits) {
366 case 7:
367 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3);
368 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2);
369 case 6:
370 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1);
371 default:
372 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0);
373 }
374
375 for (i = 0; i <= max_lr_idx; i++) {
376 if (!(live_lrs & (1 << i)))
377 continue;
378
379 __gic_v3_set_lr(cpu_if->vgic_lr[i], i);
380 }
381 }
382
383 /*
384 * Ensures that the above will have reached the
385 * (re)distributors. This ensure the guest will read the
386 * correct values from the memory-mapped interface.
387 */
388 if (!cpu_if->vgic_sre) {
389 isb();
390 dsb(sy);
391 }
392 vcpu->arch.vgic_cpu.live_lrs = live_lrs;
393
394 /*
395 * Prevent the guest from touching the GIC system registers if
396 * SRE isn't enabled for GICv3 emulation.
397 */
398 write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
399 ICC_SRE_EL2);
400 }
401
402 void __hyp_text __vgic_v3_init_lrs(void)
403 {
404 int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2));
405 int i;
406
407 for (i = 0; i <= max_lr_idx; i++)
408 __gic_v3_set_lr(0, i);
409 }
410
411 u64 __hyp_text __vgic_v3_get_ich_vtr_el2(void)
412 {
413 return read_gicreg(ICH_VTR_EL2);
414 }
415
416 u64 __hyp_text __vgic_v3_read_vmcr(void)
417 {
418 return read_gicreg(ICH_VMCR_EL2);
419 }
420
421 void __hyp_text __vgic_v3_write_vmcr(u32 vmcr)
422 {
423 write_gicreg(vmcr, ICH_VMCR_EL2);
424 }
425
426 #ifdef CONFIG_ARM64
427
428 static int __hyp_text __vgic_v3_bpr_min(void)
429 {
430 /* See Pseudocode for VPriorityGroup */
431 return 8 - vtr_to_nr_pre_bits(read_gicreg(ICH_VTR_EL2));
432 }
433
434 static int __hyp_text __vgic_v3_get_group(struct kvm_vcpu *vcpu)
435 {
436 u32 esr = kvm_vcpu_get_hsr(vcpu);
437 u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
438
439 return crm != 8;
440 }
441
442 #define GICv3_IDLE_PRIORITY 0xff
443
444 static int __hyp_text __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu,
445 u32 vmcr,
446 u64 *lr_val)
447 {
448 unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs;
449 u8 priority = GICv3_IDLE_PRIORITY;
450 int i, lr = -1;
451
452 for (i = 0; i < used_lrs; i++) {
453 u64 val = __gic_v3_get_lr(i);
454 u8 lr_prio = (val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
455
456 /* Not pending in the state? */
457 if ((val & ICH_LR_STATE) != ICH_LR_PENDING_BIT)
458 continue;
459
460 /* Group-0 interrupt, but Group-0 disabled? */
461 if (!(val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG0_MASK))
462 continue;
463
464 /* Group-1 interrupt, but Group-1 disabled? */
465 if ((val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG1_MASK))
466 continue;
467
468 /* Not the highest priority? */
469 if (lr_prio >= priority)
470 continue;
471
472 /* This is a candidate */
473 priority = lr_prio;
474 *lr_val = val;
475 lr = i;
476 }
477
478 if (lr == -1)
479 *lr_val = ICC_IAR1_EL1_SPURIOUS;
480
481 return lr;
482 }
483
484 static int __hyp_text __vgic_v3_find_active_lr(struct kvm_vcpu *vcpu,
485 int intid, u64 *lr_val)
486 {
487 unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs;
488 int i;
489
490 for (i = 0; i < used_lrs; i++) {
491 u64 val = __gic_v3_get_lr(i);
492
493 if ((val & ICH_LR_VIRTUAL_ID_MASK) == intid &&
494 (val & ICH_LR_ACTIVE_BIT)) {
495 *lr_val = val;
496 return i;
497 }
498 }
499
500 *lr_val = ICC_IAR1_EL1_SPURIOUS;
501 return -1;
502 }
503
504 static int __hyp_text __vgic_v3_get_highest_active_priority(void)
505 {
506 u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
507 u32 hap = 0;
508 int i;
509
510 for (i = 0; i < nr_apr_regs; i++) {
511 u32 val;
512
513 /*
514 * The ICH_AP0Rn_EL2 and ICH_AP1Rn_EL2 registers
515 * contain the active priority levels for this VCPU
516 * for the maximum number of supported priority
517 * levels, and we return the full priority level only
518 * if the BPR is programmed to its minimum, otherwise
519 * we return a combination of the priority level and
520 * subpriority, as determined by the setting of the
521 * BPR, but without the full subpriority.
522 */
523 val = __vgic_v3_read_ap0rn(i);
524 val |= __vgic_v3_read_ap1rn(i);
525 if (!val) {
526 hap += 32;
527 continue;
528 }
529
530 return (hap + __ffs(val)) << __vgic_v3_bpr_min();
531 }
532
533 return GICv3_IDLE_PRIORITY;
534 }
535
536 static unsigned int __hyp_text __vgic_v3_get_bpr0(u32 vmcr)
537 {
538 return (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
539 }
540
541 static unsigned int __hyp_text __vgic_v3_get_bpr1(u32 vmcr)
542 {
543 unsigned int bpr;
544
545 if (vmcr & ICH_VMCR_CBPR_MASK) {
546 bpr = __vgic_v3_get_bpr0(vmcr);
547 if (bpr < 7)
548 bpr++;
549 } else {
550 bpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
551 }
552
553 return bpr;
554 }
555
556 /*
557 * Convert a priority to a preemption level, taking the relevant BPR
558 * into account by zeroing the sub-priority bits.
559 */
560 static u8 __hyp_text __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp)
561 {
562 unsigned int bpr;
563
564 if (!grp)
565 bpr = __vgic_v3_get_bpr0(vmcr) + 1;
566 else
567 bpr = __vgic_v3_get_bpr1(vmcr);
568
569 return pri & (GENMASK(7, 0) << bpr);
570 }
571
572 /*
573 * The priority value is independent of any of the BPR values, so we
574 * normalize it using the minumal BPR value. This guarantees that no
575 * matter what the guest does with its BPR, we can always set/get the
576 * same value of a priority.
577 */
578 static void __hyp_text __vgic_v3_set_active_priority(u8 pri, u32 vmcr, int grp)
579 {
580 u8 pre, ap;
581 u32 val;
582 int apr;
583
584 pre = __vgic_v3_pri_to_pre(pri, vmcr, grp);
585 ap = pre >> __vgic_v3_bpr_min();
586 apr = ap / 32;
587
588 if (!grp) {
589 val = __vgic_v3_read_ap0rn(apr);
590 __vgic_v3_write_ap0rn(val | BIT(ap % 32), apr);
591 } else {
592 val = __vgic_v3_read_ap1rn(apr);
593 __vgic_v3_write_ap1rn(val | BIT(ap % 32), apr);
594 }
595 }
596
597 static int __hyp_text __vgic_v3_clear_highest_active_priority(void)
598 {
599 u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
600 u32 hap = 0;
601 int i;
602
603 for (i = 0; i < nr_apr_regs; i++) {
604 u32 ap0, ap1;
605 int c0, c1;
606
607 ap0 = __vgic_v3_read_ap0rn(i);
608 ap1 = __vgic_v3_read_ap1rn(i);
609 if (!ap0 && !ap1) {
610 hap += 32;
611 continue;
612 }
613
614 c0 = ap0 ? __ffs(ap0) : 32;
615 c1 = ap1 ? __ffs(ap1) : 32;
616
617 /* Always clear the LSB, which is the highest priority */
618 if (c0 < c1) {
619 ap0 &= ~BIT(c0);
620 __vgic_v3_write_ap0rn(ap0, i);
621 hap += c0;
622 } else {
623 ap1 &= ~BIT(c1);
624 __vgic_v3_write_ap1rn(ap1, i);
625 hap += c1;
626 }
627
628 /* Rescale to 8 bits of priority */
629 return hap << __vgic_v3_bpr_min();
630 }
631
632 return GICv3_IDLE_PRIORITY;
633 }
634
635 static void __hyp_text __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
636 {
637 u64 lr_val;
638 u8 lr_prio, pmr;
639 int lr, grp;
640
641 grp = __vgic_v3_get_group(vcpu);
642
643 lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
644 if (lr < 0)
645 goto spurious;
646
647 if (grp != !!(lr_val & ICH_LR_GROUP))
648 goto spurious;
649
650 pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
651 lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
652 if (pmr <= lr_prio)
653 goto spurious;
654
655 if (__vgic_v3_get_highest_active_priority() <= __vgic_v3_pri_to_pre(lr_prio, vmcr, grp))
656 goto spurious;
657
658 lr_val &= ~ICH_LR_STATE;
659 /* No active state for LPIs */
660 if ((lr_val & ICH_LR_VIRTUAL_ID_MASK) <= VGIC_MAX_SPI)
661 lr_val |= ICH_LR_ACTIVE_BIT;
662 __gic_v3_set_lr(lr_val, lr);
663 __vgic_v3_set_active_priority(lr_prio, vmcr, grp);
664 vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
665 return;
666
667 spurious:
668 vcpu_set_reg(vcpu, rt, ICC_IAR1_EL1_SPURIOUS);
669 }
670
671 static void __hyp_text __vgic_v3_clear_active_lr(int lr, u64 lr_val)
672 {
673 lr_val &= ~ICH_LR_ACTIVE_BIT;
674 if (lr_val & ICH_LR_HW) {
675 u32 pid;
676
677 pid = (lr_val & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT;
678 gic_write_dir(pid);
679 }
680
681 __gic_v3_set_lr(lr_val, lr);
682 }
683
684 static void __hyp_text __vgic_v3_bump_eoicount(void)
685 {
686 u32 hcr;
687
688 hcr = read_gicreg(ICH_HCR_EL2);
689 hcr += 1 << ICH_HCR_EOIcount_SHIFT;
690 write_gicreg(hcr, ICH_HCR_EL2);
691 }
692
693 static void __hyp_text __vgic_v3_write_eoir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
694 {
695 u32 vid = vcpu_get_reg(vcpu, rt);
696 u64 lr_val;
697 u8 lr_prio, act_prio;
698 int lr, grp;
699
700 grp = __vgic_v3_get_group(vcpu);
701
702 /* Drop priority in any case */
703 act_prio = __vgic_v3_clear_highest_active_priority();
704
705 /* If EOIing an LPI, no deactivate to be performed */
706 if (vid >= VGIC_MIN_LPI)
707 return;
708
709 /* EOImode == 1, nothing to be done here */
710 if (vmcr & ICH_VMCR_EOIM_MASK)
711 return;
712
713 lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
714 if (lr == -1) {
715 __vgic_v3_bump_eoicount();
716 return;
717 }
718
719 lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
720
721 /* If priorities or group do not match, the guest has fscked-up. */
722 if (grp != !!(lr_val & ICH_LR_GROUP) ||
723 __vgic_v3_pri_to_pre(lr_prio, vmcr, grp) != act_prio)
724 return;
725
726 /* Let's now perform the deactivation */
727 __vgic_v3_clear_active_lr(lr, lr_val);
728 }
729
730 static void __hyp_text __vgic_v3_read_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
731 {
732 vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG1_MASK));
733 }
734
735 static void __hyp_text __vgic_v3_write_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
736 {
737 u64 val = vcpu_get_reg(vcpu, rt);
738
739 if (val & 1)
740 vmcr |= ICH_VMCR_ENG1_MASK;
741 else
742 vmcr &= ~ICH_VMCR_ENG1_MASK;
743
744 __vgic_v3_write_vmcr(vmcr);
745 }
746
747 static void __hyp_text __vgic_v3_read_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
748 {
749 vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr1(vmcr));
750 }
751
752 static void __hyp_text __vgic_v3_write_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
753 {
754 u64 val = vcpu_get_reg(vcpu, rt);
755 u8 bpr_min = __vgic_v3_bpr_min();
756
757 if (vmcr & ICH_VMCR_CBPR_MASK)
758 return;
759
760 /* Enforce BPR limiting */
761 if (val < bpr_min)
762 val = bpr_min;
763
764 val <<= ICH_VMCR_BPR1_SHIFT;
765 val &= ICH_VMCR_BPR1_MASK;
766 vmcr &= ~ICH_VMCR_BPR1_MASK;
767 vmcr |= val;
768
769 __vgic_v3_write_vmcr(vmcr);
770 }
771
772 int __hyp_text __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
773 {
774 int rt;
775 u32 esr;
776 u32 vmcr;
777 void (*fn)(struct kvm_vcpu *, u32, int);
778 bool is_read;
779 u32 sysreg;
780
781 esr = kvm_vcpu_get_hsr(vcpu);
782 if (vcpu_mode_is_32bit(vcpu)) {
783 if (!kvm_condition_valid(vcpu))
784 return 1;
785
786 sysreg = esr_cp15_to_sysreg(esr);
787 } else {
788 sysreg = esr_sys64_to_sysreg(esr);
789 }
790
791 is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ;
792
793 switch (sysreg) {
794 case ICC_IAR1_EL1:
795 fn = __vgic_v3_read_iar;
796 break;
797 case ICC_EOIR1_EL1:
798 fn = __vgic_v3_write_eoir;
799 break;
800 case ICC_GRPEN1_EL1:
801 if (is_read)
802 fn = __vgic_v3_read_igrpen1;
803 else
804 fn = __vgic_v3_write_igrpen1;
805 break;
806 case ICC_BPR1_EL1:
807 if (is_read)
808 fn = __vgic_v3_read_bpr1;
809 else
810 fn = __vgic_v3_write_bpr1;
811 break;
812 default:
813 return 0;
814 }
815
816 vmcr = __vgic_v3_read_vmcr();
817 rt = kvm_vcpu_sys_get_rt(vcpu);
818 fn(vcpu, vmcr, rt);
819
820 return 1;
821 }
822
823 #endif