]>
Commit | Line | Data |
---|---|---|
f68d2b1b MZ |
1 | /* |
2 | * Copyright (C) 2012-2015 - ARM Ltd | |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
18 | #include <linux/compiler.h> | |
19 | #include <linux/irqchip/arm-gic-v3.h> | |
20 | #include <linux/kvm_host.h> | |
21 | ||
59da1cbf | 22 | #include <asm/kvm_emulate.h> |
13720a56 | 23 | #include <asm/kvm_hyp.h> |
f68d2b1b MZ |
24 | |
25 | #define vtr_to_max_lr_idx(v) ((v) & 0xf) | |
d68356cc | 26 | #define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1) |
132a324a | 27 | #define vtr_to_nr_apr_regs(v) (1 << (vtr_to_nr_pre_bits(v) - 5)) |
f68d2b1b | 28 | |
1b8e83c0 MZ |
29 | static u64 __hyp_text __gic_v3_get_lr(unsigned int lr) |
30 | { | |
31 | switch (lr & 0xf) { | |
32 | case 0: | |
33 | return read_gicreg(ICH_LR0_EL2); | |
34 | case 1: | |
35 | return read_gicreg(ICH_LR1_EL2); | |
36 | case 2: | |
37 | return read_gicreg(ICH_LR2_EL2); | |
38 | case 3: | |
39 | return read_gicreg(ICH_LR3_EL2); | |
40 | case 4: | |
41 | return read_gicreg(ICH_LR4_EL2); | |
42 | case 5: | |
43 | return read_gicreg(ICH_LR5_EL2); | |
44 | case 6: | |
45 | return read_gicreg(ICH_LR6_EL2); | |
46 | case 7: | |
47 | return read_gicreg(ICH_LR7_EL2); | |
48 | case 8: | |
49 | return read_gicreg(ICH_LR8_EL2); | |
50 | case 9: | |
51 | return read_gicreg(ICH_LR9_EL2); | |
52 | case 10: | |
53 | return read_gicreg(ICH_LR10_EL2); | |
54 | case 11: | |
55 | return read_gicreg(ICH_LR11_EL2); | |
56 | case 12: | |
57 | return read_gicreg(ICH_LR12_EL2); | |
58 | case 13: | |
59 | return read_gicreg(ICH_LR13_EL2); | |
60 | case 14: | |
61 | return read_gicreg(ICH_LR14_EL2); | |
62 | case 15: | |
63 | return read_gicreg(ICH_LR15_EL2); | |
64 | } | |
65 | ||
66 | unreachable(); | |
67 | } | |
68 | ||
69 | static void __hyp_text __gic_v3_set_lr(u64 val, int lr) | |
70 | { | |
71 | switch (lr & 0xf) { | |
72 | case 0: | |
73 | write_gicreg(val, ICH_LR0_EL2); | |
74 | break; | |
75 | case 1: | |
76 | write_gicreg(val, ICH_LR1_EL2); | |
77 | break; | |
78 | case 2: | |
79 | write_gicreg(val, ICH_LR2_EL2); | |
80 | break; | |
81 | case 3: | |
82 | write_gicreg(val, ICH_LR3_EL2); | |
83 | break; | |
84 | case 4: | |
85 | write_gicreg(val, ICH_LR4_EL2); | |
86 | break; | |
87 | case 5: | |
88 | write_gicreg(val, ICH_LR5_EL2); | |
89 | break; | |
90 | case 6: | |
91 | write_gicreg(val, ICH_LR6_EL2); | |
92 | break; | |
93 | case 7: | |
94 | write_gicreg(val, ICH_LR7_EL2); | |
95 | break; | |
96 | case 8: | |
97 | write_gicreg(val, ICH_LR8_EL2); | |
98 | break; | |
99 | case 9: | |
100 | write_gicreg(val, ICH_LR9_EL2); | |
101 | break; | |
102 | case 10: | |
103 | write_gicreg(val, ICH_LR10_EL2); | |
104 | break; | |
105 | case 11: | |
106 | write_gicreg(val, ICH_LR11_EL2); | |
107 | break; | |
108 | case 12: | |
109 | write_gicreg(val, ICH_LR12_EL2); | |
110 | break; | |
111 | case 13: | |
112 | write_gicreg(val, ICH_LR13_EL2); | |
113 | break; | |
114 | case 14: | |
115 | write_gicreg(val, ICH_LR14_EL2); | |
116 | break; | |
117 | case 15: | |
118 | write_gicreg(val, ICH_LR15_EL2); | |
119 | break; | |
120 | } | |
121 | } | |
122 | ||
63000dd8 MZ |
123 | static void __hyp_text __vgic_v3_write_ap0rn(u32 val, int n) |
124 | { | |
125 | switch (n) { | |
126 | case 0: | |
127 | write_gicreg(val, ICH_AP0R0_EL2); | |
128 | break; | |
129 | case 1: | |
130 | write_gicreg(val, ICH_AP0R1_EL2); | |
131 | break; | |
132 | case 2: | |
133 | write_gicreg(val, ICH_AP0R2_EL2); | |
134 | break; | |
135 | case 3: | |
136 | write_gicreg(val, ICH_AP0R3_EL2); | |
137 | break; | |
138 | } | |
139 | } | |
140 | ||
141 | static void __hyp_text __vgic_v3_write_ap1rn(u32 val, int n) | |
142 | { | |
143 | switch (n) { | |
144 | case 0: | |
145 | write_gicreg(val, ICH_AP1R0_EL2); | |
146 | break; | |
147 | case 1: | |
148 | write_gicreg(val, ICH_AP1R1_EL2); | |
149 | break; | |
150 | case 2: | |
151 | write_gicreg(val, ICH_AP1R2_EL2); | |
152 | break; | |
153 | case 3: | |
154 | write_gicreg(val, ICH_AP1R3_EL2); | |
155 | break; | |
156 | } | |
157 | } | |
158 | ||
159 | static u32 __hyp_text __vgic_v3_read_ap0rn(int n) | |
160 | { | |
161 | u32 val; | |
162 | ||
163 | switch (n) { | |
164 | case 0: | |
165 | val = read_gicreg(ICH_AP0R0_EL2); | |
166 | break; | |
167 | case 1: | |
168 | val = read_gicreg(ICH_AP0R1_EL2); | |
169 | break; | |
170 | case 2: | |
171 | val = read_gicreg(ICH_AP0R2_EL2); | |
172 | break; | |
173 | case 3: | |
174 | val = read_gicreg(ICH_AP0R3_EL2); | |
175 | break; | |
176 | default: | |
177 | unreachable(); | |
178 | } | |
179 | ||
180 | return val; | |
181 | } | |
182 | ||
183 | static u32 __hyp_text __vgic_v3_read_ap1rn(int n) | |
184 | { | |
185 | u32 val; | |
186 | ||
187 | switch (n) { | |
188 | case 0: | |
189 | val = read_gicreg(ICH_AP1R0_EL2); | |
190 | break; | |
191 | case 1: | |
192 | val = read_gicreg(ICH_AP1R1_EL2); | |
193 | break; | |
194 | case 2: | |
195 | val = read_gicreg(ICH_AP1R2_EL2); | |
196 | break; | |
197 | case 3: | |
198 | val = read_gicreg(ICH_AP1R3_EL2); | |
199 | break; | |
200 | default: | |
201 | unreachable(); | |
202 | } | |
203 | ||
204 | return val; | |
205 | } | |
206 | ||
f68d2b1b MZ |
207 | void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) |
208 | { | |
209 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; | |
00dafa0f | 210 | u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; |
f68d2b1b | 211 | u64 val; |
f68d2b1b MZ |
212 | |
213 | /* | |
214 | * Make sure stores to the GIC via the memory mapped interface | |
215 | * are now visible to the system register interface. | |
216 | */ | |
ff567614 | 217 | if (!cpu_if->vgic_sre) { |
c5851328 | 218 | dsb(st); |
ff567614 MZ |
219 | cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); |
220 | } | |
f68d2b1b | 221 | |
00dafa0f | 222 | if (used_lrs) { |
1b8e83c0 | 223 | int i; |
15d2bffd | 224 | u32 nr_pre_bits; |
f68d2b1b | 225 | |
1b8e83c0 | 226 | cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2); |
f68d2b1b | 227 | |
1b8e83c0 MZ |
228 | write_gicreg(0, ICH_HCR_EL2); |
229 | val = read_gicreg(ICH_VTR_EL2); | |
15d2bffd | 230 | nr_pre_bits = vtr_to_nr_pre_bits(val); |
f68d2b1b | 231 | |
cffcd9df | 232 | for (i = 0; i < used_lrs; i++) { |
fa89c77e | 233 | if (cpu_if->vgic_elrsr & (1 << i)) |
84e8b9c8 | 234 | cpu_if->vgic_lr[i] &= ~ICH_LR_STATE; |
fa89c77e CD |
235 | else |
236 | cpu_if->vgic_lr[i] = __gic_v3_get_lr(i); | |
84e8b9c8 | 237 | |
b40c4892 | 238 | __gic_v3_set_lr(0, i); |
1b8e83c0 MZ |
239 | } |
240 | ||
15d2bffd | 241 | switch (nr_pre_bits) { |
1b8e83c0 | 242 | case 7: |
63000dd8 MZ |
243 | cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3); |
244 | cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2); | |
1b8e83c0 | 245 | case 6: |
63000dd8 | 246 | cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1); |
1b8e83c0 | 247 | default: |
63000dd8 | 248 | cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0); |
1b8e83c0 MZ |
249 | } |
250 | ||
15d2bffd | 251 | switch (nr_pre_bits) { |
1b8e83c0 | 252 | case 7: |
63000dd8 MZ |
253 | cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3); |
254 | cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2); | |
1b8e83c0 | 255 | case 6: |
63000dd8 | 256 | cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1); |
1b8e83c0 | 257 | default: |
63000dd8 | 258 | cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0); |
1b8e83c0 | 259 | } |
1b8e83c0 | 260 | } else { |
374be35e MZ |
261 | if (static_branch_unlikely(&vgic_v3_cpuif_trap) || |
262 | cpu_if->its_vpe.its_vm) | |
9c7bfc28 MZ |
263 | write_gicreg(0, ICH_HCR_EL2); |
264 | ||
1b8e83c0 MZ |
265 | cpu_if->vgic_elrsr = 0xffff; |
266 | cpu_if->vgic_ap0r[0] = 0; | |
267 | cpu_if->vgic_ap0r[1] = 0; | |
268 | cpu_if->vgic_ap0r[2] = 0; | |
269 | cpu_if->vgic_ap0r[3] = 0; | |
270 | cpu_if->vgic_ap1r[0] = 0; | |
271 | cpu_if->vgic_ap1r[1] = 0; | |
272 | cpu_if->vgic_ap1r[2] = 0; | |
273 | cpu_if->vgic_ap1r[3] = 0; | |
f68d2b1b MZ |
274 | } |
275 | ||
276 | val = read_gicreg(ICC_SRE_EL2); | |
277 | write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2); | |
c5851328 MZ |
278 | |
279 | if (!cpu_if->vgic_sre) { | |
280 | /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */ | |
281 | isb(); | |
282 | write_gicreg(1, ICC_SRE_EL1); | |
283 | } | |
f68d2b1b MZ |
284 | } |
285 | ||
286 | void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) | |
287 | { | |
288 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; | |
00dafa0f | 289 | u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; |
f68d2b1b | 290 | u64 val; |
15d2bffd | 291 | u32 nr_pre_bits; |
1b8e83c0 | 292 | int i; |
f68d2b1b MZ |
293 | |
294 | /* | |
295 | * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a | |
296 | * Group0 interrupt (as generated in GICv2 mode) to be | |
297 | * delivered as a FIQ to the guest, with potentially fatal | |
298 | * consequences. So we must make sure that ICC_SRE_EL1 has | |
299 | * been actually programmed with the value we want before | |
ff567614 MZ |
300 | * starting to mess with the rest of the GIC, and VMCR_EL2 in |
301 | * particular. | |
f68d2b1b | 302 | */ |
c5851328 MZ |
303 | if (!cpu_if->vgic_sre) { |
304 | write_gicreg(0, ICC_SRE_EL1); | |
305 | isb(); | |
ff567614 | 306 | write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2); |
c5851328 | 307 | } |
f68d2b1b | 308 | |
f68d2b1b | 309 | val = read_gicreg(ICH_VTR_EL2); |
15d2bffd | 310 | nr_pre_bits = vtr_to_nr_pre_bits(val); |
f68d2b1b | 311 | |
00dafa0f | 312 | if (used_lrs) { |
1b8e83c0 MZ |
313 | write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); |
314 | ||
15d2bffd | 315 | switch (nr_pre_bits) { |
1b8e83c0 | 316 | case 7: |
63000dd8 MZ |
317 | __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3); |
318 | __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2); | |
1b8e83c0 | 319 | case 6: |
63000dd8 | 320 | __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1); |
1b8e83c0 | 321 | default: |
63000dd8 | 322 | __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0); |
1b8e83c0 MZ |
323 | } |
324 | ||
15d2bffd | 325 | switch (nr_pre_bits) { |
1b8e83c0 | 326 | case 7: |
63000dd8 MZ |
327 | __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3); |
328 | __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2); | |
1b8e83c0 | 329 | case 6: |
63000dd8 | 330 | __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1); |
1b8e83c0 | 331 | default: |
63000dd8 | 332 | __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0); |
1b8e83c0 MZ |
333 | } |
334 | ||
00dafa0f | 335 | for (i = 0; i < used_lrs; i++) |
b40c4892 | 336 | __gic_v3_set_lr(cpu_if->vgic_lr[i], i); |
9c7bfc28 MZ |
337 | } else { |
338 | /* | |
339 | * If we need to trap system registers, we must write | |
340 | * ICH_HCR_EL2 anyway, even if no interrupts are being | |
374be35e MZ |
341 | * injected. Same thing if GICv4 is used, as VLPI |
342 | * delivery is gated by ICH_HCR_EL2.En. | |
9c7bfc28 | 343 | */ |
374be35e MZ |
344 | if (static_branch_unlikely(&vgic_v3_cpuif_trap) || |
345 | cpu_if->its_vpe.its_vm) | |
9c7bfc28 | 346 | write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); |
f68d2b1b MZ |
347 | } |
348 | ||
349 | /* | |
350 | * Ensures that the above will have reached the | |
351 | * (re)distributors. This ensure the guest will read the | |
352 | * correct values from the memory-mapped interface. | |
353 | */ | |
c5851328 MZ |
354 | if (!cpu_if->vgic_sre) { |
355 | isb(); | |
356 | dsb(sy); | |
357 | } | |
f68d2b1b MZ |
358 | |
359 | /* | |
360 | * Prevent the guest from touching the GIC system registers if | |
361 | * SRE isn't enabled for GICv3 emulation. | |
362 | */ | |
a057001e MZ |
363 | write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE, |
364 | ICC_SRE_EL2); | |
f68d2b1b MZ |
365 | } |
366 | ||
0d98d00b MZ |
367 | void __hyp_text __vgic_v3_init_lrs(void) |
368 | { | |
369 | int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2)); | |
370 | int i; | |
371 | ||
372 | for (i = 0; i <= max_lr_idx; i++) | |
373 | __gic_v3_set_lr(0, i); | |
374 | } | |
375 | ||
cf0ba18a | 376 | u64 __hyp_text __vgic_v3_get_ich_vtr_el2(void) |
f68d2b1b MZ |
377 | { |
378 | return read_gicreg(ICH_VTR_EL2); | |
379 | } | |
328e5664 CD |
380 | |
381 | u64 __hyp_text __vgic_v3_read_vmcr(void) | |
382 | { | |
383 | return read_gicreg(ICH_VMCR_EL2); | |
384 | } | |
385 | ||
386 | void __hyp_text __vgic_v3_write_vmcr(u32 vmcr) | |
387 | { | |
388 | write_gicreg(vmcr, ICH_VMCR_EL2); | |
389 | } | |
59da1cbf MZ |
390 | |
391 | #ifdef CONFIG_ARM64 | |
392 | ||
d70c7b31 MZ |
393 | static int __hyp_text __vgic_v3_bpr_min(void) |
394 | { | |
395 | /* See Pseudocode for VPriorityGroup */ | |
396 | return 8 - vtr_to_nr_pre_bits(read_gicreg(ICH_VTR_EL2)); | |
397 | } | |
398 | ||
132a324a MZ |
399 | static int __hyp_text __vgic_v3_get_group(struct kvm_vcpu *vcpu) |
400 | { | |
401 | u32 esr = kvm_vcpu_get_hsr(vcpu); | |
402 | u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT; | |
403 | ||
404 | return crm != 8; | |
405 | } | |
406 | ||
407 | #define GICv3_IDLE_PRIORITY 0xff | |
408 | ||
409 | static int __hyp_text __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu, | |
410 | u32 vmcr, | |
411 | u64 *lr_val) | |
412 | { | |
413 | unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs; | |
414 | u8 priority = GICv3_IDLE_PRIORITY; | |
415 | int i, lr = -1; | |
416 | ||
417 | for (i = 0; i < used_lrs; i++) { | |
418 | u64 val = __gic_v3_get_lr(i); | |
419 | u8 lr_prio = (val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT; | |
420 | ||
421 | /* Not pending in the state? */ | |
422 | if ((val & ICH_LR_STATE) != ICH_LR_PENDING_BIT) | |
423 | continue; | |
424 | ||
425 | /* Group-0 interrupt, but Group-0 disabled? */ | |
426 | if (!(val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG0_MASK)) | |
427 | continue; | |
428 | ||
429 | /* Group-1 interrupt, but Group-1 disabled? */ | |
430 | if ((val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG1_MASK)) | |
431 | continue; | |
432 | ||
433 | /* Not the highest priority? */ | |
434 | if (lr_prio >= priority) | |
435 | continue; | |
436 | ||
437 | /* This is a candidate */ | |
438 | priority = lr_prio; | |
439 | *lr_val = val; | |
440 | lr = i; | |
441 | } | |
442 | ||
443 | if (lr == -1) | |
444 | *lr_val = ICC_IAR1_EL1_SPURIOUS; | |
445 | ||
446 | return lr; | |
447 | } | |
448 | ||
b6f49035 MZ |
449 | static int __hyp_text __vgic_v3_find_active_lr(struct kvm_vcpu *vcpu, |
450 | int intid, u64 *lr_val) | |
451 | { | |
452 | unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs; | |
453 | int i; | |
454 | ||
455 | for (i = 0; i < used_lrs; i++) { | |
456 | u64 val = __gic_v3_get_lr(i); | |
457 | ||
458 | if ((val & ICH_LR_VIRTUAL_ID_MASK) == intid && | |
459 | (val & ICH_LR_ACTIVE_BIT)) { | |
460 | *lr_val = val; | |
461 | return i; | |
462 | } | |
463 | } | |
464 | ||
465 | *lr_val = ICC_IAR1_EL1_SPURIOUS; | |
466 | return -1; | |
467 | } | |
468 | ||
132a324a MZ |
469 | static int __hyp_text __vgic_v3_get_highest_active_priority(void) |
470 | { | |
471 | u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2)); | |
472 | u32 hap = 0; | |
473 | int i; | |
474 | ||
475 | for (i = 0; i < nr_apr_regs; i++) { | |
476 | u32 val; | |
477 | ||
478 | /* | |
479 | * The ICH_AP0Rn_EL2 and ICH_AP1Rn_EL2 registers | |
480 | * contain the active priority levels for this VCPU | |
481 | * for the maximum number of supported priority | |
482 | * levels, and we return the full priority level only | |
483 | * if the BPR is programmed to its minimum, otherwise | |
484 | * we return a combination of the priority level and | |
485 | * subpriority, as determined by the setting of the | |
486 | * BPR, but without the full subpriority. | |
487 | */ | |
488 | val = __vgic_v3_read_ap0rn(i); | |
489 | val |= __vgic_v3_read_ap1rn(i); | |
490 | if (!val) { | |
491 | hap += 32; | |
492 | continue; | |
493 | } | |
494 | ||
495 | return (hap + __ffs(val)) << __vgic_v3_bpr_min(); | |
496 | } | |
497 | ||
498 | return GICv3_IDLE_PRIORITY; | |
499 | } | |
500 | ||
d70c7b31 MZ |
501 | static unsigned int __hyp_text __vgic_v3_get_bpr0(u32 vmcr) |
502 | { | |
503 | return (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT; | |
504 | } | |
505 | ||
506 | static unsigned int __hyp_text __vgic_v3_get_bpr1(u32 vmcr) | |
507 | { | |
508 | unsigned int bpr; | |
509 | ||
510 | if (vmcr & ICH_VMCR_CBPR_MASK) { | |
511 | bpr = __vgic_v3_get_bpr0(vmcr); | |
512 | if (bpr < 7) | |
513 | bpr++; | |
514 | } else { | |
515 | bpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT; | |
516 | } | |
517 | ||
518 | return bpr; | |
519 | } | |
520 | ||
132a324a MZ |
521 | /* |
522 | * Convert a priority to a preemption level, taking the relevant BPR | |
523 | * into account by zeroing the sub-priority bits. | |
524 | */ | |
525 | static u8 __hyp_text __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp) | |
526 | { | |
527 | unsigned int bpr; | |
528 | ||
529 | if (!grp) | |
530 | bpr = __vgic_v3_get_bpr0(vmcr) + 1; | |
531 | else | |
532 | bpr = __vgic_v3_get_bpr1(vmcr); | |
533 | ||
534 | return pri & (GENMASK(7, 0) << bpr); | |
535 | } | |
536 | ||
537 | /* | |
538 | * The priority value is independent of any of the BPR values, so we | |
539 | * normalize it using the minumal BPR value. This guarantees that no | |
540 | * matter what the guest does with its BPR, we can always set/get the | |
541 | * same value of a priority. | |
542 | */ | |
543 | static void __hyp_text __vgic_v3_set_active_priority(u8 pri, u32 vmcr, int grp) | |
544 | { | |
545 | u8 pre, ap; | |
546 | u32 val; | |
547 | int apr; | |
548 | ||
549 | pre = __vgic_v3_pri_to_pre(pri, vmcr, grp); | |
550 | ap = pre >> __vgic_v3_bpr_min(); | |
551 | apr = ap / 32; | |
552 | ||
553 | if (!grp) { | |
554 | val = __vgic_v3_read_ap0rn(apr); | |
555 | __vgic_v3_write_ap0rn(val | BIT(ap % 32), apr); | |
556 | } else { | |
557 | val = __vgic_v3_read_ap1rn(apr); | |
558 | __vgic_v3_write_ap1rn(val | BIT(ap % 32), apr); | |
559 | } | |
560 | } | |
561 | ||
b6f49035 MZ |
562 | static int __hyp_text __vgic_v3_clear_highest_active_priority(void) |
563 | { | |
564 | u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2)); | |
565 | u32 hap = 0; | |
566 | int i; | |
567 | ||
568 | for (i = 0; i < nr_apr_regs; i++) { | |
569 | u32 ap0, ap1; | |
570 | int c0, c1; | |
571 | ||
572 | ap0 = __vgic_v3_read_ap0rn(i); | |
573 | ap1 = __vgic_v3_read_ap1rn(i); | |
574 | if (!ap0 && !ap1) { | |
575 | hap += 32; | |
576 | continue; | |
577 | } | |
578 | ||
579 | c0 = ap0 ? __ffs(ap0) : 32; | |
580 | c1 = ap1 ? __ffs(ap1) : 32; | |
581 | ||
582 | /* Always clear the LSB, which is the highest priority */ | |
583 | if (c0 < c1) { | |
584 | ap0 &= ~BIT(c0); | |
585 | __vgic_v3_write_ap0rn(ap0, i); | |
586 | hap += c0; | |
587 | } else { | |
588 | ap1 &= ~BIT(c1); | |
589 | __vgic_v3_write_ap1rn(ap1, i); | |
590 | hap += c1; | |
591 | } | |
592 | ||
593 | /* Rescale to 8 bits of priority */ | |
594 | return hap << __vgic_v3_bpr_min(); | |
595 | } | |
596 | ||
597 | return GICv3_IDLE_PRIORITY; | |
598 | } | |
599 | ||
132a324a MZ |
600 | static void __hyp_text __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
601 | { | |
602 | u64 lr_val; | |
603 | u8 lr_prio, pmr; | |
604 | int lr, grp; | |
605 | ||
606 | grp = __vgic_v3_get_group(vcpu); | |
607 | ||
608 | lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val); | |
609 | if (lr < 0) | |
610 | goto spurious; | |
611 | ||
612 | if (grp != !!(lr_val & ICH_LR_GROUP)) | |
613 | goto spurious; | |
614 | ||
615 | pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; | |
616 | lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT; | |
617 | if (pmr <= lr_prio) | |
618 | goto spurious; | |
619 | ||
620 | if (__vgic_v3_get_highest_active_priority() <= __vgic_v3_pri_to_pre(lr_prio, vmcr, grp)) | |
621 | goto spurious; | |
622 | ||
623 | lr_val &= ~ICH_LR_STATE; | |
624 | /* No active state for LPIs */ | |
625 | if ((lr_val & ICH_LR_VIRTUAL_ID_MASK) <= VGIC_MAX_SPI) | |
626 | lr_val |= ICH_LR_ACTIVE_BIT; | |
627 | __gic_v3_set_lr(lr_val, lr); | |
628 | __vgic_v3_set_active_priority(lr_prio, vmcr, grp); | |
629 | vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK); | |
630 | return; | |
631 | ||
632 | spurious: | |
633 | vcpu_set_reg(vcpu, rt, ICC_IAR1_EL1_SPURIOUS); | |
634 | } | |
635 | ||
b6f49035 MZ |
636 | static void __hyp_text __vgic_v3_clear_active_lr(int lr, u64 lr_val) |
637 | { | |
638 | lr_val &= ~ICH_LR_ACTIVE_BIT; | |
639 | if (lr_val & ICH_LR_HW) { | |
640 | u32 pid; | |
641 | ||
642 | pid = (lr_val & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT; | |
643 | gic_write_dir(pid); | |
644 | } | |
645 | ||
646 | __gic_v3_set_lr(lr_val, lr); | |
647 | } | |
648 | ||
649 | static void __hyp_text __vgic_v3_bump_eoicount(void) | |
650 | { | |
651 | u32 hcr; | |
652 | ||
653 | hcr = read_gicreg(ICH_HCR_EL2); | |
654 | hcr += 1 << ICH_HCR_EOIcount_SHIFT; | |
655 | write_gicreg(hcr, ICH_HCR_EL2); | |
656 | } | |
657 | ||
40228ba5 MZ |
658 | static void __hyp_text __vgic_v3_write_dir(struct kvm_vcpu *vcpu, |
659 | u32 vmcr, int rt) | |
660 | { | |
661 | u32 vid = vcpu_get_reg(vcpu, rt); | |
662 | u64 lr_val; | |
663 | int lr; | |
664 | ||
665 | /* EOImode == 0, nothing to be done here */ | |
666 | if (!(vmcr & ICH_VMCR_EOIM_MASK)) | |
667 | return; | |
668 | ||
669 | /* No deactivate to be performed on an LPI */ | |
670 | if (vid >= VGIC_MIN_LPI) | |
671 | return; | |
672 | ||
673 | lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val); | |
674 | if (lr == -1) { | |
675 | __vgic_v3_bump_eoicount(); | |
676 | return; | |
677 | } | |
678 | ||
679 | __vgic_v3_clear_active_lr(lr, lr_val); | |
680 | } | |
681 | ||
b6f49035 MZ |
682 | static void __hyp_text __vgic_v3_write_eoir(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
683 | { | |
684 | u32 vid = vcpu_get_reg(vcpu, rt); | |
685 | u64 lr_val; | |
686 | u8 lr_prio, act_prio; | |
687 | int lr, grp; | |
688 | ||
689 | grp = __vgic_v3_get_group(vcpu); | |
690 | ||
691 | /* Drop priority in any case */ | |
692 | act_prio = __vgic_v3_clear_highest_active_priority(); | |
693 | ||
694 | /* If EOIing an LPI, no deactivate to be performed */ | |
695 | if (vid >= VGIC_MIN_LPI) | |
696 | return; | |
697 | ||
698 | /* EOImode == 1, nothing to be done here */ | |
699 | if (vmcr & ICH_VMCR_EOIM_MASK) | |
700 | return; | |
701 | ||
702 | lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val); | |
703 | if (lr == -1) { | |
704 | __vgic_v3_bump_eoicount(); | |
705 | return; | |
706 | } | |
707 | ||
708 | lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT; | |
709 | ||
710 | /* If priorities or group do not match, the guest has fscked-up. */ | |
711 | if (grp != !!(lr_val & ICH_LR_GROUP) || | |
712 | __vgic_v3_pri_to_pre(lr_prio, vmcr, grp) != act_prio) | |
713 | return; | |
714 | ||
715 | /* Let's now perform the deactivation */ | |
716 | __vgic_v3_clear_active_lr(lr, lr_val); | |
717 | } | |
718 | ||
fbc48a00 MZ |
719 | static void __hyp_text __vgic_v3_read_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
720 | { | |
721 | vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG0_MASK)); | |
722 | } | |
723 | ||
f8b630bc MZ |
724 | static void __hyp_text __vgic_v3_read_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
725 | { | |
726 | vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG1_MASK)); | |
727 | } | |
728 | ||
fbc48a00 MZ |
729 | static void __hyp_text __vgic_v3_write_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
730 | { | |
731 | u64 val = vcpu_get_reg(vcpu, rt); | |
732 | ||
733 | if (val & 1) | |
734 | vmcr |= ICH_VMCR_ENG0_MASK; | |
735 | else | |
736 | vmcr &= ~ICH_VMCR_ENG0_MASK; | |
737 | ||
738 | __vgic_v3_write_vmcr(vmcr); | |
739 | } | |
740 | ||
f8b630bc MZ |
741 | static void __hyp_text __vgic_v3_write_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
742 | { | |
743 | u64 val = vcpu_get_reg(vcpu, rt); | |
744 | ||
745 | if (val & 1) | |
746 | vmcr |= ICH_VMCR_ENG1_MASK; | |
747 | else | |
748 | vmcr &= ~ICH_VMCR_ENG1_MASK; | |
749 | ||
750 | __vgic_v3_write_vmcr(vmcr); | |
751 | } | |
752 | ||
423de85a MZ |
753 | static void __hyp_text __vgic_v3_read_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
754 | { | |
755 | vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr0(vmcr)); | |
756 | } | |
757 | ||
d70c7b31 MZ |
758 | static void __hyp_text __vgic_v3_read_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
759 | { | |
760 | vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr1(vmcr)); | |
761 | } | |
762 | ||
423de85a MZ |
763 | static void __hyp_text __vgic_v3_write_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
764 | { | |
765 | u64 val = vcpu_get_reg(vcpu, rt); | |
766 | u8 bpr_min = __vgic_v3_bpr_min() - 1; | |
767 | ||
768 | /* Enforce BPR limiting */ | |
769 | if (val < bpr_min) | |
770 | val = bpr_min; | |
771 | ||
772 | val <<= ICH_VMCR_BPR0_SHIFT; | |
773 | val &= ICH_VMCR_BPR0_MASK; | |
774 | vmcr &= ~ICH_VMCR_BPR0_MASK; | |
775 | vmcr |= val; | |
776 | ||
777 | __vgic_v3_write_vmcr(vmcr); | |
778 | } | |
779 | ||
d70c7b31 MZ |
780 | static void __hyp_text __vgic_v3_write_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
781 | { | |
782 | u64 val = vcpu_get_reg(vcpu, rt); | |
783 | u8 bpr_min = __vgic_v3_bpr_min(); | |
784 | ||
785 | if (vmcr & ICH_VMCR_CBPR_MASK) | |
786 | return; | |
787 | ||
788 | /* Enforce BPR limiting */ | |
789 | if (val < bpr_min) | |
790 | val = bpr_min; | |
791 | ||
792 | val <<= ICH_VMCR_BPR1_SHIFT; | |
793 | val &= ICH_VMCR_BPR1_MASK; | |
794 | vmcr &= ~ICH_VMCR_BPR1_MASK; | |
795 | vmcr |= val; | |
796 | ||
797 | __vgic_v3_write_vmcr(vmcr); | |
798 | } | |
799 | ||
f9e7449c MZ |
800 | static void __hyp_text __vgic_v3_read_apxrn(struct kvm_vcpu *vcpu, int rt, int n) |
801 | { | |
802 | u32 val; | |
803 | ||
804 | if (!__vgic_v3_get_group(vcpu)) | |
805 | val = __vgic_v3_read_ap0rn(n); | |
806 | else | |
807 | val = __vgic_v3_read_ap1rn(n); | |
808 | ||
809 | vcpu_set_reg(vcpu, rt, val); | |
810 | } | |
811 | ||
812 | static void __hyp_text __vgic_v3_write_apxrn(struct kvm_vcpu *vcpu, int rt, int n) | |
813 | { | |
814 | u32 val = vcpu_get_reg(vcpu, rt); | |
815 | ||
816 | if (!__vgic_v3_get_group(vcpu)) | |
817 | __vgic_v3_write_ap0rn(val, n); | |
818 | else | |
819 | __vgic_v3_write_ap1rn(val, n); | |
820 | } | |
821 | ||
822 | static void __hyp_text __vgic_v3_read_apxr0(struct kvm_vcpu *vcpu, | |
823 | u32 vmcr, int rt) | |
824 | { | |
825 | __vgic_v3_read_apxrn(vcpu, rt, 0); | |
826 | } | |
827 | ||
828 | static void __hyp_text __vgic_v3_read_apxr1(struct kvm_vcpu *vcpu, | |
829 | u32 vmcr, int rt) | |
830 | { | |
831 | __vgic_v3_read_apxrn(vcpu, rt, 1); | |
832 | } | |
833 | ||
834 | static void __hyp_text __vgic_v3_read_apxr2(struct kvm_vcpu *vcpu, | |
835 | u32 vmcr, int rt) | |
836 | { | |
837 | __vgic_v3_read_apxrn(vcpu, rt, 2); | |
838 | } | |
839 | ||
840 | static void __hyp_text __vgic_v3_read_apxr3(struct kvm_vcpu *vcpu, | |
841 | u32 vmcr, int rt) | |
842 | { | |
843 | __vgic_v3_read_apxrn(vcpu, rt, 3); | |
844 | } | |
845 | ||
846 | static void __hyp_text __vgic_v3_write_apxr0(struct kvm_vcpu *vcpu, | |
847 | u32 vmcr, int rt) | |
848 | { | |
849 | __vgic_v3_write_apxrn(vcpu, rt, 0); | |
850 | } | |
851 | ||
852 | static void __hyp_text __vgic_v3_write_apxr1(struct kvm_vcpu *vcpu, | |
853 | u32 vmcr, int rt) | |
854 | { | |
855 | __vgic_v3_write_apxrn(vcpu, rt, 1); | |
856 | } | |
857 | ||
858 | static void __hyp_text __vgic_v3_write_apxr2(struct kvm_vcpu *vcpu, | |
859 | u32 vmcr, int rt) | |
860 | { | |
861 | __vgic_v3_write_apxrn(vcpu, rt, 2); | |
862 | } | |
863 | ||
864 | static void __hyp_text __vgic_v3_write_apxr3(struct kvm_vcpu *vcpu, | |
865 | u32 vmcr, int rt) | |
866 | { | |
867 | __vgic_v3_write_apxrn(vcpu, rt, 3); | |
868 | } | |
869 | ||
2724c11a MZ |
870 | static void __hyp_text __vgic_v3_read_hppir(struct kvm_vcpu *vcpu, |
871 | u32 vmcr, int rt) | |
872 | { | |
873 | u64 lr_val; | |
874 | int lr, lr_grp, grp; | |
875 | ||
876 | grp = __vgic_v3_get_group(vcpu); | |
877 | ||
878 | lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val); | |
879 | if (lr == -1) | |
880 | goto spurious; | |
881 | ||
882 | lr_grp = !!(lr_val & ICH_LR_GROUP); | |
883 | if (lr_grp != grp) | |
884 | lr_val = ICC_IAR1_EL1_SPURIOUS; | |
885 | ||
886 | spurious: | |
887 | vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK); | |
888 | } | |
889 | ||
6293d651 MZ |
890 | static void __hyp_text __vgic_v3_read_pmr(struct kvm_vcpu *vcpu, |
891 | u32 vmcr, int rt) | |
892 | { | |
893 | vmcr &= ICH_VMCR_PMR_MASK; | |
894 | vmcr >>= ICH_VMCR_PMR_SHIFT; | |
895 | vcpu_set_reg(vcpu, rt, vmcr); | |
896 | } | |
897 | ||
898 | static void __hyp_text __vgic_v3_write_pmr(struct kvm_vcpu *vcpu, | |
899 | u32 vmcr, int rt) | |
900 | { | |
901 | u32 val = vcpu_get_reg(vcpu, rt); | |
902 | ||
903 | val <<= ICH_VMCR_PMR_SHIFT; | |
904 | val &= ICH_VMCR_PMR_MASK; | |
905 | vmcr &= ~ICH_VMCR_PMR_MASK; | |
906 | vmcr |= val; | |
907 | ||
908 | write_gicreg(vmcr, ICH_VMCR_EL2); | |
909 | } | |
910 | ||
43515894 MZ |
911 | static void __hyp_text __vgic_v3_read_rpr(struct kvm_vcpu *vcpu, |
912 | u32 vmcr, int rt) | |
913 | { | |
914 | u32 val = __vgic_v3_get_highest_active_priority(); | |
915 | vcpu_set_reg(vcpu, rt, val); | |
916 | } | |
917 | ||
d840b2d3 MZ |
918 | static void __hyp_text __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu, |
919 | u32 vmcr, int rt) | |
920 | { | |
921 | u32 vtr, val; | |
922 | ||
923 | vtr = read_gicreg(ICH_VTR_EL2); | |
924 | /* PRIbits */ | |
925 | val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT; | |
926 | /* IDbits */ | |
927 | val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT; | |
928 | /* SEIS */ | |
929 | val |= ((vtr >> 22) & 1) << ICC_CTLR_EL1_SEIS_SHIFT; | |
930 | /* A3V */ | |
931 | val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT; | |
932 | /* EOImode */ | |
933 | val |= ((vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT) << ICC_CTLR_EL1_EOImode_SHIFT; | |
934 | /* CBPR */ | |
935 | val |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT; | |
936 | ||
937 | vcpu_set_reg(vcpu, rt, val); | |
938 | } | |
939 | ||
940 | static void __hyp_text __vgic_v3_write_ctlr(struct kvm_vcpu *vcpu, | |
941 | u32 vmcr, int rt) | |
942 | { | |
943 | u32 val = vcpu_get_reg(vcpu, rt); | |
944 | ||
945 | if (val & ICC_CTLR_EL1_CBPR_MASK) | |
946 | vmcr |= ICH_VMCR_CBPR_MASK; | |
947 | else | |
948 | vmcr &= ~ICH_VMCR_CBPR_MASK; | |
949 | ||
950 | if (val & ICC_CTLR_EL1_EOImode_MASK) | |
951 | vmcr |= ICH_VMCR_EOIM_MASK; | |
952 | else | |
953 | vmcr &= ~ICH_VMCR_EOIM_MASK; | |
954 | ||
955 | write_gicreg(vmcr, ICH_VMCR_EL2); | |
956 | } | |
957 | ||
59da1cbf MZ |
958 | int __hyp_text __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu) |
959 | { | |
960 | int rt; | |
961 | u32 esr; | |
962 | u32 vmcr; | |
963 | void (*fn)(struct kvm_vcpu *, u32, int); | |
964 | bool is_read; | |
965 | u32 sysreg; | |
966 | ||
967 | esr = kvm_vcpu_get_hsr(vcpu); | |
968 | if (vcpu_mode_is_32bit(vcpu)) { | |
969 | if (!kvm_condition_valid(vcpu)) | |
970 | return 1; | |
971 | ||
972 | sysreg = esr_cp15_to_sysreg(esr); | |
973 | } else { | |
974 | sysreg = esr_sys64_to_sysreg(esr); | |
975 | } | |
976 | ||
977 | is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ; | |
978 | ||
979 | switch (sysreg) { | |
eab0b2dc | 980 | case SYS_ICC_IAR0_EL1: |
132a324a | 981 | case SYS_ICC_IAR1_EL1: |
7b1dba1f MZ |
982 | if (unlikely(!is_read)) |
983 | return 0; | |
132a324a MZ |
984 | fn = __vgic_v3_read_iar; |
985 | break; | |
eab0b2dc | 986 | case SYS_ICC_EOIR0_EL1: |
b6f49035 | 987 | case SYS_ICC_EOIR1_EL1: |
e7f1d1ee MZ |
988 | if (unlikely(is_read)) |
989 | return 0; | |
b6f49035 MZ |
990 | fn = __vgic_v3_write_eoir; |
991 | break; | |
21bc5281 | 992 | case SYS_ICC_IGRPEN1_EL1: |
f8b630bc MZ |
993 | if (is_read) |
994 | fn = __vgic_v3_read_igrpen1; | |
995 | else | |
996 | fn = __vgic_v3_write_igrpen1; | |
997 | break; | |
d70c7b31 MZ |
998 | case SYS_ICC_BPR1_EL1: |
999 | if (is_read) | |
1000 | fn = __vgic_v3_read_bpr1; | |
1001 | else | |
1002 | fn = __vgic_v3_write_bpr1; | |
1003 | break; | |
eab0b2dc | 1004 | case SYS_ICC_AP0Rn_EL1(0): |
f9e7449c MZ |
1005 | case SYS_ICC_AP1Rn_EL1(0): |
1006 | if (is_read) | |
1007 | fn = __vgic_v3_read_apxr0; | |
1008 | else | |
1009 | fn = __vgic_v3_write_apxr0; | |
1010 | break; | |
eab0b2dc | 1011 | case SYS_ICC_AP0Rn_EL1(1): |
f9e7449c MZ |
1012 | case SYS_ICC_AP1Rn_EL1(1): |
1013 | if (is_read) | |
1014 | fn = __vgic_v3_read_apxr1; | |
1015 | else | |
1016 | fn = __vgic_v3_write_apxr1; | |
1017 | break; | |
eab0b2dc | 1018 | case SYS_ICC_AP0Rn_EL1(2): |
f9e7449c MZ |
1019 | case SYS_ICC_AP1Rn_EL1(2): |
1020 | if (is_read) | |
1021 | fn = __vgic_v3_read_apxr2; | |
1022 | else | |
1023 | fn = __vgic_v3_write_apxr2; | |
1024 | break; | |
eab0b2dc | 1025 | case SYS_ICC_AP0Rn_EL1(3): |
f9e7449c MZ |
1026 | case SYS_ICC_AP1Rn_EL1(3): |
1027 | if (is_read) | |
1028 | fn = __vgic_v3_read_apxr3; | |
1029 | else | |
1030 | fn = __vgic_v3_write_apxr3; | |
1031 | break; | |
eab0b2dc | 1032 | case SYS_ICC_HPPIR0_EL1: |
2724c11a | 1033 | case SYS_ICC_HPPIR1_EL1: |
7b1dba1f MZ |
1034 | if (unlikely(!is_read)) |
1035 | return 0; | |
2724c11a MZ |
1036 | fn = __vgic_v3_read_hppir; |
1037 | break; | |
21bc5281 | 1038 | case SYS_ICC_IGRPEN0_EL1: |
fbc48a00 MZ |
1039 | if (is_read) |
1040 | fn = __vgic_v3_read_igrpen0; | |
1041 | else | |
1042 | fn = __vgic_v3_write_igrpen0; | |
1043 | break; | |
423de85a MZ |
1044 | case SYS_ICC_BPR0_EL1: |
1045 | if (is_read) | |
1046 | fn = __vgic_v3_read_bpr0; | |
1047 | else | |
1048 | fn = __vgic_v3_write_bpr0; | |
1049 | break; | |
40228ba5 | 1050 | case SYS_ICC_DIR_EL1: |
e7f1d1ee MZ |
1051 | if (unlikely(is_read)) |
1052 | return 0; | |
40228ba5 MZ |
1053 | fn = __vgic_v3_write_dir; |
1054 | break; | |
43515894 | 1055 | case SYS_ICC_RPR_EL1: |
7b1dba1f MZ |
1056 | if (unlikely(!is_read)) |
1057 | return 0; | |
43515894 MZ |
1058 | fn = __vgic_v3_read_rpr; |
1059 | break; | |
d840b2d3 MZ |
1060 | case SYS_ICC_CTLR_EL1: |
1061 | if (is_read) | |
1062 | fn = __vgic_v3_read_ctlr; | |
1063 | else | |
1064 | fn = __vgic_v3_write_ctlr; | |
1065 | break; | |
6293d651 MZ |
1066 | case SYS_ICC_PMR_EL1: |
1067 | if (is_read) | |
1068 | fn = __vgic_v3_read_pmr; | |
1069 | else | |
1070 | fn = __vgic_v3_write_pmr; | |
1071 | break; | |
59da1cbf MZ |
1072 | default: |
1073 | return 0; | |
1074 | } | |
1075 | ||
1076 | vmcr = __vgic_v3_read_vmcr(); | |
1077 | rt = kvm_vcpu_sys_get_rt(vcpu); | |
1078 | fn(vcpu, vmcr, rt); | |
1079 | ||
1080 | return 1; | |
1081 | } | |
1082 | ||
1083 | #endif |