]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - virt/kvm/arm/hyp/vgic-v3-sr.c
KVM: arm64: vgic-v3: Add ICV_IAR1_EL1 handler
[mirror_ubuntu-zesty-kernel.git] / virt / kvm / arm / hyp / vgic-v3-sr.c
CommitLineData
f68d2b1b
MZ
1/*
2 * Copyright (C) 2012-2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/compiler.h>
19#include <linux/irqchip/arm-gic-v3.h>
20#include <linux/kvm_host.h>
21
0c94a05f 22#include <asm/kvm_emulate.h>
13720a56 23#include <asm/kvm_hyp.h>
f68d2b1b
MZ
24
25#define vtr_to_max_lr_idx(v) ((v) & 0xf)
86f5e242 26#define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1)
0b97ae72 27#define vtr_to_nr_apr_regs(v) (1 << (vtr_to_nr_pre_bits(v) - 5))
f68d2b1b 28
1b8e83c0
MZ
29static u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
30{
31 switch (lr & 0xf) {
32 case 0:
33 return read_gicreg(ICH_LR0_EL2);
34 case 1:
35 return read_gicreg(ICH_LR1_EL2);
36 case 2:
37 return read_gicreg(ICH_LR2_EL2);
38 case 3:
39 return read_gicreg(ICH_LR3_EL2);
40 case 4:
41 return read_gicreg(ICH_LR4_EL2);
42 case 5:
43 return read_gicreg(ICH_LR5_EL2);
44 case 6:
45 return read_gicreg(ICH_LR6_EL2);
46 case 7:
47 return read_gicreg(ICH_LR7_EL2);
48 case 8:
49 return read_gicreg(ICH_LR8_EL2);
50 case 9:
51 return read_gicreg(ICH_LR9_EL2);
52 case 10:
53 return read_gicreg(ICH_LR10_EL2);
54 case 11:
55 return read_gicreg(ICH_LR11_EL2);
56 case 12:
57 return read_gicreg(ICH_LR12_EL2);
58 case 13:
59 return read_gicreg(ICH_LR13_EL2);
60 case 14:
61 return read_gicreg(ICH_LR14_EL2);
62 case 15:
63 return read_gicreg(ICH_LR15_EL2);
64 }
65
66 unreachable();
67}
68
69static void __hyp_text __gic_v3_set_lr(u64 val, int lr)
70{
71 switch (lr & 0xf) {
72 case 0:
73 write_gicreg(val, ICH_LR0_EL2);
74 break;
75 case 1:
76 write_gicreg(val, ICH_LR1_EL2);
77 break;
78 case 2:
79 write_gicreg(val, ICH_LR2_EL2);
80 break;
81 case 3:
82 write_gicreg(val, ICH_LR3_EL2);
83 break;
84 case 4:
85 write_gicreg(val, ICH_LR4_EL2);
86 break;
87 case 5:
88 write_gicreg(val, ICH_LR5_EL2);
89 break;
90 case 6:
91 write_gicreg(val, ICH_LR6_EL2);
92 break;
93 case 7:
94 write_gicreg(val, ICH_LR7_EL2);
95 break;
96 case 8:
97 write_gicreg(val, ICH_LR8_EL2);
98 break;
99 case 9:
100 write_gicreg(val, ICH_LR9_EL2);
101 break;
102 case 10:
103 write_gicreg(val, ICH_LR10_EL2);
104 break;
105 case 11:
106 write_gicreg(val, ICH_LR11_EL2);
107 break;
108 case 12:
109 write_gicreg(val, ICH_LR12_EL2);
110 break;
111 case 13:
112 write_gicreg(val, ICH_LR13_EL2);
113 break;
114 case 14:
115 write_gicreg(val, ICH_LR14_EL2);
116 break;
117 case 15:
118 write_gicreg(val, ICH_LR15_EL2);
119 break;
120 }
121}
122
b4344545
MZ
123static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu, int nr_lr)
124{
125 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
126 int i;
127 bool expect_mi;
128
129 expect_mi = !!(cpu_if->vgic_hcr & ICH_HCR_UIE);
130
131 for (i = 0; i < nr_lr; i++) {
132 if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
133 continue;
134
135 expect_mi |= (!(cpu_if->vgic_lr[i] & ICH_LR_HW) &&
136 (cpu_if->vgic_lr[i] & ICH_LR_EOI));
137 }
138
139 if (expect_mi) {
140 cpu_if->vgic_misr = read_gicreg(ICH_MISR_EL2);
141
142 if (cpu_if->vgic_misr & ICH_MISR_EOI)
143 cpu_if->vgic_eisr = read_gicreg(ICH_EISR_EL2);
144 else
145 cpu_if->vgic_eisr = 0;
146 } else {
147 cpu_if->vgic_misr = 0;
148 cpu_if->vgic_eisr = 0;
149 }
150}
151
fb8232a4
MZ
152static void __hyp_text __vgic_v3_write_ap0rn(u32 val, int n)
153{
154 switch (n) {
155 case 0:
156 write_gicreg(val, ICH_AP0R0_EL2);
157 break;
158 case 1:
159 write_gicreg(val, ICH_AP0R1_EL2);
160 break;
161 case 2:
162 write_gicreg(val, ICH_AP0R2_EL2);
163 break;
164 case 3:
165 write_gicreg(val, ICH_AP0R3_EL2);
166 break;
167 }
168}
169
170static void __hyp_text __vgic_v3_write_ap1rn(u32 val, int n)
171{
172 switch (n) {
173 case 0:
174 write_gicreg(val, ICH_AP1R0_EL2);
175 break;
176 case 1:
177 write_gicreg(val, ICH_AP1R1_EL2);
178 break;
179 case 2:
180 write_gicreg(val, ICH_AP1R2_EL2);
181 break;
182 case 3:
183 write_gicreg(val, ICH_AP1R3_EL2);
184 break;
185 }
186}
187
188static u32 __hyp_text __vgic_v3_read_ap0rn(int n)
189{
190 u32 val;
191
192 switch (n) {
193 case 0:
194 val = read_gicreg(ICH_AP0R0_EL2);
195 break;
196 case 1:
197 val = read_gicreg(ICH_AP0R1_EL2);
198 break;
199 case 2:
200 val = read_gicreg(ICH_AP0R2_EL2);
201 break;
202 case 3:
203 val = read_gicreg(ICH_AP0R3_EL2);
204 break;
205 default:
206 unreachable();
207 }
208
209 return val;
210}
211
212static u32 __hyp_text __vgic_v3_read_ap1rn(int n)
213{
214 u32 val;
215
216 switch (n) {
217 case 0:
218 val = read_gicreg(ICH_AP1R0_EL2);
219 break;
220 case 1:
221 val = read_gicreg(ICH_AP1R1_EL2);
222 break;
223 case 2:
224 val = read_gicreg(ICH_AP1R2_EL2);
225 break;
226 case 3:
227 val = read_gicreg(ICH_AP1R3_EL2);
228 break;
229 default:
230 unreachable();
231 }
232
233 return val;
234}
235
f68d2b1b
MZ
236void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
237{
238 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
239 u64 val;
f68d2b1b
MZ
240
241 /*
242 * Make sure stores to the GIC via the memory mapped interface
243 * are now visible to the system register interface.
244 */
c5851328
MZ
245 if (!cpu_if->vgic_sre)
246 dsb(st);
f68d2b1b
MZ
247
248 cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
f68d2b1b 249
1b8e83c0
MZ
250 if (vcpu->arch.vgic_cpu.live_lrs) {
251 int i;
70fc1477 252 u32 max_lr_idx, nr_pre_bits;
f68d2b1b 253
1b8e83c0 254 cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2);
f68d2b1b 255
1b8e83c0
MZ
256 write_gicreg(0, ICH_HCR_EL2);
257 val = read_gicreg(ICH_VTR_EL2);
258 max_lr_idx = vtr_to_max_lr_idx(val);
70fc1477 259 nr_pre_bits = vtr_to_nr_pre_bits(val);
f68d2b1b 260
b4344545
MZ
261 save_maint_int_state(vcpu, max_lr_idx + 1);
262
1b8e83c0 263 for (i = 0; i <= max_lr_idx; i++) {
84e8b9c8
MZ
264 if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
265 continue;
266
fa89c77e 267 if (cpu_if->vgic_elrsr & (1 << i))
84e8b9c8 268 cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
fa89c77e
CD
269 else
270 cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
84e8b9c8 271
b40c4892 272 __gic_v3_set_lr(0, i);
1b8e83c0
MZ
273 }
274
70fc1477 275 switch (nr_pre_bits) {
1b8e83c0 276 case 7:
fb8232a4
MZ
277 cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3);
278 cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2);
1b8e83c0 279 case 6:
fb8232a4 280 cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1);
1b8e83c0 281 default:
fb8232a4 282 cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0);
1b8e83c0
MZ
283 }
284
70fc1477 285 switch (nr_pre_bits) {
1b8e83c0 286 case 7:
fb8232a4
MZ
287 cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3);
288 cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2);
1b8e83c0 289 case 6:
fb8232a4 290 cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1);
1b8e83c0 291 default:
fb8232a4 292 cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
1b8e83c0
MZ
293 }
294
295 vcpu->arch.vgic_cpu.live_lrs = 0;
296 } else {
297 cpu_if->vgic_misr = 0;
298 cpu_if->vgic_eisr = 0;
299 cpu_if->vgic_elrsr = 0xffff;
300 cpu_if->vgic_ap0r[0] = 0;
301 cpu_if->vgic_ap0r[1] = 0;
302 cpu_if->vgic_ap0r[2] = 0;
303 cpu_if->vgic_ap0r[3] = 0;
304 cpu_if->vgic_ap1r[0] = 0;
305 cpu_if->vgic_ap1r[1] = 0;
306 cpu_if->vgic_ap1r[2] = 0;
307 cpu_if->vgic_ap1r[3] = 0;
f68d2b1b
MZ
308 }
309
310 val = read_gicreg(ICC_SRE_EL2);
311 write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
c5851328
MZ
312
313 if (!cpu_if->vgic_sre) {
314 /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
315 isb();
316 write_gicreg(1, ICC_SRE_EL1);
317 }
f68d2b1b
MZ
318}
319
320void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
321{
322 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
323 u64 val;
70fc1477 324 u32 max_lr_idx, nr_pre_bits;
1b8e83c0
MZ
325 u16 live_lrs = 0;
326 int i;
f68d2b1b
MZ
327
328 /*
329 * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
330 * Group0 interrupt (as generated in GICv2 mode) to be
331 * delivered as a FIQ to the guest, with potentially fatal
332 * consequences. So we must make sure that ICC_SRE_EL1 has
333 * been actually programmed with the value we want before
334 * starting to mess with the rest of the GIC.
335 */
c5851328
MZ
336 if (!cpu_if->vgic_sre) {
337 write_gicreg(0, ICC_SRE_EL1);
338 isb();
339 }
f68d2b1b 340
f68d2b1b
MZ
341 val = read_gicreg(ICH_VTR_EL2);
342 max_lr_idx = vtr_to_max_lr_idx(val);
70fc1477 343 nr_pre_bits = vtr_to_nr_pre_bits(val);
f68d2b1b 344
1b8e83c0
MZ
345 for (i = 0; i <= max_lr_idx; i++) {
346 if (cpu_if->vgic_lr[i] & ICH_LR_STATE)
347 live_lrs |= (1 << i);
f68d2b1b
MZ
348 }
349
1b8e83c0 350 write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
fd451b90 351
1b8e83c0
MZ
352 if (live_lrs) {
353 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
354
70fc1477 355 switch (nr_pre_bits) {
1b8e83c0 356 case 7:
fb8232a4
MZ
357 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3);
358 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2);
1b8e83c0 359 case 6:
fb8232a4 360 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1);
1b8e83c0 361 default:
fb8232a4 362 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0);
1b8e83c0
MZ
363 }
364
70fc1477 365 switch (nr_pre_bits) {
1b8e83c0 366 case 7:
fb8232a4
MZ
367 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3);
368 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2);
1b8e83c0 369 case 6:
fb8232a4 370 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1);
1b8e83c0 371 default:
fb8232a4 372 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0);
1b8e83c0
MZ
373 }
374
375 for (i = 0; i <= max_lr_idx; i++) {
b40c4892
MZ
376 if (!(live_lrs & (1 << i)))
377 continue;
1b8e83c0 378
b40c4892 379 __gic_v3_set_lr(cpu_if->vgic_lr[i], i);
1b8e83c0 380 }
f68d2b1b
MZ
381 }
382
383 /*
384 * Ensures that the above will have reached the
385 * (re)distributors. This ensure the guest will read the
386 * correct values from the memory-mapped interface.
387 */
c5851328
MZ
388 if (!cpu_if->vgic_sre) {
389 isb();
390 dsb(sy);
391 }
1b8e83c0 392 vcpu->arch.vgic_cpu.live_lrs = live_lrs;
f68d2b1b
MZ
393
394 /*
395 * Prevent the guest from touching the GIC system registers if
396 * SRE isn't enabled for GICv3 emulation.
397 */
a057001e
MZ
398 write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
399 ICC_SRE_EL2);
f68d2b1b
MZ
400}
401
0d98d00b
MZ
402void __hyp_text __vgic_v3_init_lrs(void)
403{
404 int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2));
405 int i;
406
407 for (i = 0; i <= max_lr_idx; i++)
408 __gic_v3_set_lr(0, i);
409}
410
cf0ba18a 411u64 __hyp_text __vgic_v3_get_ich_vtr_el2(void)
f68d2b1b
MZ
412{
413 return read_gicreg(ICH_VTR_EL2);
414}
0c94a05f
MZ
415
416u64 __hyp_text __vgic_v3_read_vmcr(void)
417{
418 return read_gicreg(ICH_VMCR_EL2);
419}
420
ecffed38
MZ
421void __hyp_text __vgic_v3_write_vmcr(u32 vmcr)
422{
423 write_gicreg(vmcr, ICH_VMCR_EL2);
424}
425
0c94a05f
MZ
426#ifdef CONFIG_ARM64
427
ecffed38
MZ
428static int __hyp_text __vgic_v3_bpr_min(void)
429{
430 /* See Pseudocode for VPriorityGroup */
431 return 8 - vtr_to_nr_pre_bits(read_gicreg(ICH_VTR_EL2));
432}
433
0b97ae72
MZ
434static int __hyp_text __vgic_v3_get_group(struct kvm_vcpu *vcpu)
435{
436 u32 esr = kvm_vcpu_get_hsr(vcpu);
437 u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
438
439 return crm != 8;
440}
441
442#define GICv3_IDLE_PRIORITY 0xff
443
444static int __hyp_text __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu,
445 u32 vmcr,
446 u64 *lr_val)
447{
448 unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs;
449 u8 priority = GICv3_IDLE_PRIORITY;
450 int i, lr = -1;
451
452 for (i = 0; i < used_lrs; i++) {
453 u64 val = __gic_v3_get_lr(i);
454 u8 lr_prio = (val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
455
456 /* Not pending in the state? */
457 if ((val & ICH_LR_STATE) != ICH_LR_PENDING_BIT)
458 continue;
459
460 /* Group-0 interrupt, but Group-0 disabled? */
461 if (!(val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG0_MASK))
462 continue;
463
464 /* Group-1 interrupt, but Group-1 disabled? */
465 if ((val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG1_MASK))
466 continue;
467
468 /* Not the highest priority? */
469 if (lr_prio >= priority)
470 continue;
471
472 /* This is a candidate */
473 priority = lr_prio;
474 *lr_val = val;
475 lr = i;
476 }
477
478 if (lr == -1)
479 *lr_val = ICC_IAR1_EL1_SPURIOUS;
480
481 return lr;
482}
483
484static int __hyp_text __vgic_v3_get_highest_active_priority(void)
485{
486 u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
487 u32 hap = 0;
488 int i;
489
490 for (i = 0; i < nr_apr_regs; i++) {
491 u32 val;
492
493 /*
494 * The ICH_AP0Rn_EL2 and ICH_AP1Rn_EL2 registers
495 * contain the active priority levels for this VCPU
496 * for the maximum number of supported priority
497 * levels, and we return the full priority level only
498 * if the BPR is programmed to its minimum, otherwise
499 * we return a combination of the priority level and
500 * subpriority, as determined by the setting of the
501 * BPR, but without the full subpriority.
502 */
503 val = __vgic_v3_read_ap0rn(i);
504 val |= __vgic_v3_read_ap1rn(i);
505 if (!val) {
506 hap += 32;
507 continue;
508 }
509
510 return (hap + __ffs(val)) << __vgic_v3_bpr_min();
511 }
512
513 return GICv3_IDLE_PRIORITY;
514}
515
ecffed38
MZ
516static unsigned int __hyp_text __vgic_v3_get_bpr0(u32 vmcr)
517{
518 return (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
519}
520
521static unsigned int __hyp_text __vgic_v3_get_bpr1(u32 vmcr)
522{
523 unsigned int bpr;
524
525 if (vmcr & ICH_VMCR_CBPR_MASK) {
526 bpr = __vgic_v3_get_bpr0(vmcr);
527 if (bpr < 7)
528 bpr++;
529 } else {
530 bpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
531 }
532
533 return bpr;
534}
535
0b97ae72
MZ
536/*
537 * Convert a priority to a preemption level, taking the relevant BPR
538 * into account by zeroing the sub-priority bits.
539 */
540static u8 __hyp_text __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp)
541{
542 unsigned int bpr;
543
544 if (!grp)
545 bpr = __vgic_v3_get_bpr0(vmcr) + 1;
546 else
547 bpr = __vgic_v3_get_bpr1(vmcr);
548
549 return pri & (GENMASK(7, 0) << bpr);
550}
551
552/*
553 * The priority value is independent of any of the BPR values, so we
554 * normalize it using the minumal BPR value. This guarantees that no
555 * matter what the guest does with its BPR, we can always set/get the
556 * same value of a priority.
557 */
558static void __hyp_text __vgic_v3_set_active_priority(u8 pri, u32 vmcr, int grp)
559{
560 u8 pre, ap;
561 u32 val;
562 int apr;
563
564 pre = __vgic_v3_pri_to_pre(pri, vmcr, grp);
565 ap = pre >> __vgic_v3_bpr_min();
566 apr = ap / 32;
567
568 if (!grp) {
569 val = __vgic_v3_read_ap0rn(apr);
570 __vgic_v3_write_ap0rn(val | BIT(ap % 32), apr);
571 } else {
572 val = __vgic_v3_read_ap1rn(apr);
573 __vgic_v3_write_ap1rn(val | BIT(ap % 32), apr);
574 }
575}
576
577static void __hyp_text __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
578{
579 u64 lr_val;
580 u8 lr_prio, pmr;
581 int lr, grp;
582
583 grp = __vgic_v3_get_group(vcpu);
584
585 lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
586 if (lr < 0)
587 goto spurious;
588
589 if (grp != !!(lr_val & ICH_LR_GROUP))
590 goto spurious;
591
592 pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
593 lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
594 if (pmr <= lr_prio)
595 goto spurious;
596
597 if (__vgic_v3_get_highest_active_priority() <= __vgic_v3_pri_to_pre(lr_prio, vmcr, grp))
598 goto spurious;
599
600 lr_val &= ~ICH_LR_STATE;
601 /* No active state for LPIs */
602 if ((lr_val & ICH_LR_VIRTUAL_ID_MASK) <= VGIC_MAX_SPI)
603 lr_val |= ICH_LR_ACTIVE_BIT;
604 __gic_v3_set_lr(lr_val, lr);
605 __vgic_v3_set_active_priority(lr_prio, vmcr, grp);
606 vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
607 return;
608
609spurious:
610 vcpu_set_reg(vcpu, rt, ICC_IAR1_EL1_SPURIOUS);
611}
612
74bb351e
MZ
613static void __hyp_text __vgic_v3_read_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
614{
615 vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG1_MASK));
616}
617
618static void __hyp_text __vgic_v3_write_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
619{
620 u64 val = vcpu_get_reg(vcpu, rt);
621
622 if (val & 1)
623 vmcr |= ICH_VMCR_ENG1_MASK;
624 else
625 vmcr &= ~ICH_VMCR_ENG1_MASK;
626
627 __vgic_v3_write_vmcr(vmcr);
628}
629
ecffed38
MZ
630static void __hyp_text __vgic_v3_read_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
631{
632 vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr1(vmcr));
633}
634
635static void __hyp_text __vgic_v3_write_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
636{
637 u64 val = vcpu_get_reg(vcpu, rt);
638 u8 bpr_min = __vgic_v3_bpr_min();
639
640 if (vmcr & ICH_VMCR_CBPR_MASK)
641 return;
642
643 /* Enforce BPR limiting */
644 if (val < bpr_min)
645 val = bpr_min;
646
647 val <<= ICH_VMCR_BPR1_SHIFT;
648 val &= ICH_VMCR_BPR1_MASK;
649 vmcr &= ~ICH_VMCR_BPR1_MASK;
650 vmcr |= val;
651
652 __vgic_v3_write_vmcr(vmcr);
653}
654
0c94a05f
MZ
655int __hyp_text __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
656{
657 int rt;
658 u32 esr;
659 u32 vmcr;
660 void (*fn)(struct kvm_vcpu *, u32, int);
661 bool is_read;
662 u32 sysreg;
663
664 esr = kvm_vcpu_get_hsr(vcpu);
665 if (vcpu_mode_is_32bit(vcpu)) {
666 if (!kvm_condition_valid(vcpu))
667 return 1;
668
669 sysreg = esr_cp15_to_sysreg(esr);
670 } else {
671 sysreg = esr_sys64_to_sysreg(esr);
672 }
673
674 is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ;
675
676 switch (sysreg) {
0b97ae72
MZ
677 case ICC_IAR1_EL1:
678 fn = __vgic_v3_read_iar;
679 break;
74bb351e
MZ
680 case ICC_GRPEN1_EL1:
681 if (is_read)
682 fn = __vgic_v3_read_igrpen1;
683 else
684 fn = __vgic_v3_write_igrpen1;
685 break;
ecffed38
MZ
686 case ICC_BPR1_EL1:
687 if (is_read)
688 fn = __vgic_v3_read_bpr1;
689 else
690 fn = __vgic_v3_write_bpr1;
691 break;
0c94a05f
MZ
692 default:
693 return 0;
694 }
695
696 vmcr = __vgic_v3_read_vmcr();
697 rt = kvm_vcpu_sys_get_rt(vcpu);
698 fn(vcpu, vmcr, rt);
699
700 return 1;
701}
702
703#endif