]>
Commit | Line | Data |
---|---|---|
f68d2b1b MZ |
1 | /* |
2 | * Copyright (C) 2012-2015 - ARM Ltd | |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
18 | #include <linux/compiler.h> | |
19 | #include <linux/irqchip/arm-gic-v3.h> | |
20 | #include <linux/kvm_host.h> | |
21 | ||
0c94a05f | 22 | #include <asm/kvm_emulate.h> |
13720a56 | 23 | #include <asm/kvm_hyp.h> |
f68d2b1b MZ |
24 | |
25 | #define vtr_to_max_lr_idx(v) ((v) & 0xf) | |
86f5e242 | 26 | #define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1) |
0b97ae72 | 27 | #define vtr_to_nr_apr_regs(v) (1 << (vtr_to_nr_pre_bits(v) - 5)) |
f68d2b1b | 28 | |
1b8e83c0 MZ |
29 | static u64 __hyp_text __gic_v3_get_lr(unsigned int lr) |
30 | { | |
31 | switch (lr & 0xf) { | |
32 | case 0: | |
33 | return read_gicreg(ICH_LR0_EL2); | |
34 | case 1: | |
35 | return read_gicreg(ICH_LR1_EL2); | |
36 | case 2: | |
37 | return read_gicreg(ICH_LR2_EL2); | |
38 | case 3: | |
39 | return read_gicreg(ICH_LR3_EL2); | |
40 | case 4: | |
41 | return read_gicreg(ICH_LR4_EL2); | |
42 | case 5: | |
43 | return read_gicreg(ICH_LR5_EL2); | |
44 | case 6: | |
45 | return read_gicreg(ICH_LR6_EL2); | |
46 | case 7: | |
47 | return read_gicreg(ICH_LR7_EL2); | |
48 | case 8: | |
49 | return read_gicreg(ICH_LR8_EL2); | |
50 | case 9: | |
51 | return read_gicreg(ICH_LR9_EL2); | |
52 | case 10: | |
53 | return read_gicreg(ICH_LR10_EL2); | |
54 | case 11: | |
55 | return read_gicreg(ICH_LR11_EL2); | |
56 | case 12: | |
57 | return read_gicreg(ICH_LR12_EL2); | |
58 | case 13: | |
59 | return read_gicreg(ICH_LR13_EL2); | |
60 | case 14: | |
61 | return read_gicreg(ICH_LR14_EL2); | |
62 | case 15: | |
63 | return read_gicreg(ICH_LR15_EL2); | |
64 | } | |
65 | ||
66 | unreachable(); | |
67 | } | |
68 | ||
69 | static void __hyp_text __gic_v3_set_lr(u64 val, int lr) | |
70 | { | |
71 | switch (lr & 0xf) { | |
72 | case 0: | |
73 | write_gicreg(val, ICH_LR0_EL2); | |
74 | break; | |
75 | case 1: | |
76 | write_gicreg(val, ICH_LR1_EL2); | |
77 | break; | |
78 | case 2: | |
79 | write_gicreg(val, ICH_LR2_EL2); | |
80 | break; | |
81 | case 3: | |
82 | write_gicreg(val, ICH_LR3_EL2); | |
83 | break; | |
84 | case 4: | |
85 | write_gicreg(val, ICH_LR4_EL2); | |
86 | break; | |
87 | case 5: | |
88 | write_gicreg(val, ICH_LR5_EL2); | |
89 | break; | |
90 | case 6: | |
91 | write_gicreg(val, ICH_LR6_EL2); | |
92 | break; | |
93 | case 7: | |
94 | write_gicreg(val, ICH_LR7_EL2); | |
95 | break; | |
96 | case 8: | |
97 | write_gicreg(val, ICH_LR8_EL2); | |
98 | break; | |
99 | case 9: | |
100 | write_gicreg(val, ICH_LR9_EL2); | |
101 | break; | |
102 | case 10: | |
103 | write_gicreg(val, ICH_LR10_EL2); | |
104 | break; | |
105 | case 11: | |
106 | write_gicreg(val, ICH_LR11_EL2); | |
107 | break; | |
108 | case 12: | |
109 | write_gicreg(val, ICH_LR12_EL2); | |
110 | break; | |
111 | case 13: | |
112 | write_gicreg(val, ICH_LR13_EL2); | |
113 | break; | |
114 | case 14: | |
115 | write_gicreg(val, ICH_LR14_EL2); | |
116 | break; | |
117 | case 15: | |
118 | write_gicreg(val, ICH_LR15_EL2); | |
119 | break; | |
120 | } | |
121 | } | |
122 | ||
b4344545 MZ |
123 | static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu, int nr_lr) |
124 | { | |
125 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; | |
126 | int i; | |
127 | bool expect_mi; | |
128 | ||
129 | expect_mi = !!(cpu_if->vgic_hcr & ICH_HCR_UIE); | |
130 | ||
131 | for (i = 0; i < nr_lr; i++) { | |
132 | if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i))) | |
133 | continue; | |
134 | ||
135 | expect_mi |= (!(cpu_if->vgic_lr[i] & ICH_LR_HW) && | |
136 | (cpu_if->vgic_lr[i] & ICH_LR_EOI)); | |
137 | } | |
138 | ||
139 | if (expect_mi) { | |
140 | cpu_if->vgic_misr = read_gicreg(ICH_MISR_EL2); | |
141 | ||
142 | if (cpu_if->vgic_misr & ICH_MISR_EOI) | |
143 | cpu_if->vgic_eisr = read_gicreg(ICH_EISR_EL2); | |
144 | else | |
145 | cpu_if->vgic_eisr = 0; | |
146 | } else { | |
147 | cpu_if->vgic_misr = 0; | |
148 | cpu_if->vgic_eisr = 0; | |
149 | } | |
150 | } | |
151 | ||
fb8232a4 MZ |
152 | static void __hyp_text __vgic_v3_write_ap0rn(u32 val, int n) |
153 | { | |
154 | switch (n) { | |
155 | case 0: | |
156 | write_gicreg(val, ICH_AP0R0_EL2); | |
157 | break; | |
158 | case 1: | |
159 | write_gicreg(val, ICH_AP0R1_EL2); | |
160 | break; | |
161 | case 2: | |
162 | write_gicreg(val, ICH_AP0R2_EL2); | |
163 | break; | |
164 | case 3: | |
165 | write_gicreg(val, ICH_AP0R3_EL2); | |
166 | break; | |
167 | } | |
168 | } | |
169 | ||
170 | static void __hyp_text __vgic_v3_write_ap1rn(u32 val, int n) | |
171 | { | |
172 | switch (n) { | |
173 | case 0: | |
174 | write_gicreg(val, ICH_AP1R0_EL2); | |
175 | break; | |
176 | case 1: | |
177 | write_gicreg(val, ICH_AP1R1_EL2); | |
178 | break; | |
179 | case 2: | |
180 | write_gicreg(val, ICH_AP1R2_EL2); | |
181 | break; | |
182 | case 3: | |
183 | write_gicreg(val, ICH_AP1R3_EL2); | |
184 | break; | |
185 | } | |
186 | } | |
187 | ||
188 | static u32 __hyp_text __vgic_v3_read_ap0rn(int n) | |
189 | { | |
190 | u32 val; | |
191 | ||
192 | switch (n) { | |
193 | case 0: | |
194 | val = read_gicreg(ICH_AP0R0_EL2); | |
195 | break; | |
196 | case 1: | |
197 | val = read_gicreg(ICH_AP0R1_EL2); | |
198 | break; | |
199 | case 2: | |
200 | val = read_gicreg(ICH_AP0R2_EL2); | |
201 | break; | |
202 | case 3: | |
203 | val = read_gicreg(ICH_AP0R3_EL2); | |
204 | break; | |
205 | default: | |
206 | unreachable(); | |
207 | } | |
208 | ||
209 | return val; | |
210 | } | |
211 | ||
212 | static u32 __hyp_text __vgic_v3_read_ap1rn(int n) | |
213 | { | |
214 | u32 val; | |
215 | ||
216 | switch (n) { | |
217 | case 0: | |
218 | val = read_gicreg(ICH_AP1R0_EL2); | |
219 | break; | |
220 | case 1: | |
221 | val = read_gicreg(ICH_AP1R1_EL2); | |
222 | break; | |
223 | case 2: | |
224 | val = read_gicreg(ICH_AP1R2_EL2); | |
225 | break; | |
226 | case 3: | |
227 | val = read_gicreg(ICH_AP1R3_EL2); | |
228 | break; | |
229 | default: | |
230 | unreachable(); | |
231 | } | |
232 | ||
233 | return val; | |
234 | } | |
235 | ||
f68d2b1b MZ |
236 | void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) |
237 | { | |
238 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; | |
239 | u64 val; | |
f68d2b1b MZ |
240 | |
241 | /* | |
242 | * Make sure stores to the GIC via the memory mapped interface | |
243 | * are now visible to the system register interface. | |
244 | */ | |
c5851328 MZ |
245 | if (!cpu_if->vgic_sre) |
246 | dsb(st); | |
f68d2b1b MZ |
247 | |
248 | cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); | |
f68d2b1b | 249 | |
1b8e83c0 MZ |
250 | if (vcpu->arch.vgic_cpu.live_lrs) { |
251 | int i; | |
70fc1477 | 252 | u32 max_lr_idx, nr_pre_bits; |
f68d2b1b | 253 | |
1b8e83c0 | 254 | cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2); |
f68d2b1b | 255 | |
1b8e83c0 MZ |
256 | write_gicreg(0, ICH_HCR_EL2); |
257 | val = read_gicreg(ICH_VTR_EL2); | |
258 | max_lr_idx = vtr_to_max_lr_idx(val); | |
70fc1477 | 259 | nr_pre_bits = vtr_to_nr_pre_bits(val); |
f68d2b1b | 260 | |
b4344545 MZ |
261 | save_maint_int_state(vcpu, max_lr_idx + 1); |
262 | ||
1b8e83c0 | 263 | for (i = 0; i <= max_lr_idx; i++) { |
84e8b9c8 MZ |
264 | if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i))) |
265 | continue; | |
266 | ||
fa89c77e | 267 | if (cpu_if->vgic_elrsr & (1 << i)) |
84e8b9c8 | 268 | cpu_if->vgic_lr[i] &= ~ICH_LR_STATE; |
fa89c77e CD |
269 | else |
270 | cpu_if->vgic_lr[i] = __gic_v3_get_lr(i); | |
84e8b9c8 | 271 | |
b40c4892 | 272 | __gic_v3_set_lr(0, i); |
1b8e83c0 MZ |
273 | } |
274 | ||
70fc1477 | 275 | switch (nr_pre_bits) { |
1b8e83c0 | 276 | case 7: |
fb8232a4 MZ |
277 | cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3); |
278 | cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2); | |
1b8e83c0 | 279 | case 6: |
fb8232a4 | 280 | cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1); |
1b8e83c0 | 281 | default: |
fb8232a4 | 282 | cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0); |
1b8e83c0 MZ |
283 | } |
284 | ||
70fc1477 | 285 | switch (nr_pre_bits) { |
1b8e83c0 | 286 | case 7: |
fb8232a4 MZ |
287 | cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3); |
288 | cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2); | |
1b8e83c0 | 289 | case 6: |
fb8232a4 | 290 | cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1); |
1b8e83c0 | 291 | default: |
fb8232a4 | 292 | cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0); |
1b8e83c0 MZ |
293 | } |
294 | ||
295 | vcpu->arch.vgic_cpu.live_lrs = 0; | |
296 | } else { | |
14adc6b1 MZ |
297 | if (static_branch_unlikely(&vgic_v3_cpuif_trap)) |
298 | write_gicreg(0, ICH_HCR_EL2); | |
299 | ||
1b8e83c0 MZ |
300 | cpu_if->vgic_misr = 0; |
301 | cpu_if->vgic_eisr = 0; | |
302 | cpu_if->vgic_elrsr = 0xffff; | |
303 | cpu_if->vgic_ap0r[0] = 0; | |
304 | cpu_if->vgic_ap0r[1] = 0; | |
305 | cpu_if->vgic_ap0r[2] = 0; | |
306 | cpu_if->vgic_ap0r[3] = 0; | |
307 | cpu_if->vgic_ap1r[0] = 0; | |
308 | cpu_if->vgic_ap1r[1] = 0; | |
309 | cpu_if->vgic_ap1r[2] = 0; | |
310 | cpu_if->vgic_ap1r[3] = 0; | |
f68d2b1b MZ |
311 | } |
312 | ||
313 | val = read_gicreg(ICC_SRE_EL2); | |
314 | write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2); | |
c5851328 MZ |
315 | |
316 | if (!cpu_if->vgic_sre) { | |
317 | /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */ | |
318 | isb(); | |
319 | write_gicreg(1, ICC_SRE_EL1); | |
320 | } | |
f68d2b1b MZ |
321 | } |
322 | ||
323 | void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) | |
324 | { | |
325 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; | |
326 | u64 val; | |
70fc1477 | 327 | u32 max_lr_idx, nr_pre_bits; |
1b8e83c0 MZ |
328 | u16 live_lrs = 0; |
329 | int i; | |
f68d2b1b MZ |
330 | |
331 | /* | |
332 | * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a | |
333 | * Group0 interrupt (as generated in GICv2 mode) to be | |
334 | * delivered as a FIQ to the guest, with potentially fatal | |
335 | * consequences. So we must make sure that ICC_SRE_EL1 has | |
336 | * been actually programmed with the value we want before | |
337 | * starting to mess with the rest of the GIC. | |
338 | */ | |
c5851328 MZ |
339 | if (!cpu_if->vgic_sre) { |
340 | write_gicreg(0, ICC_SRE_EL1); | |
341 | isb(); | |
342 | } | |
f68d2b1b | 343 | |
f68d2b1b MZ |
344 | val = read_gicreg(ICH_VTR_EL2); |
345 | max_lr_idx = vtr_to_max_lr_idx(val); | |
70fc1477 | 346 | nr_pre_bits = vtr_to_nr_pre_bits(val); |
f68d2b1b | 347 | |
1b8e83c0 MZ |
348 | for (i = 0; i <= max_lr_idx; i++) { |
349 | if (cpu_if->vgic_lr[i] & ICH_LR_STATE) | |
350 | live_lrs |= (1 << i); | |
f68d2b1b MZ |
351 | } |
352 | ||
1b8e83c0 | 353 | write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2); |
fd451b90 | 354 | |
1b8e83c0 MZ |
355 | if (live_lrs) { |
356 | write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); | |
357 | ||
70fc1477 | 358 | switch (nr_pre_bits) { |
1b8e83c0 | 359 | case 7: |
fb8232a4 MZ |
360 | __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3); |
361 | __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2); | |
1b8e83c0 | 362 | case 6: |
fb8232a4 | 363 | __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1); |
1b8e83c0 | 364 | default: |
fb8232a4 | 365 | __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0); |
1b8e83c0 MZ |
366 | } |
367 | ||
70fc1477 | 368 | switch (nr_pre_bits) { |
1b8e83c0 | 369 | case 7: |
fb8232a4 MZ |
370 | __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3); |
371 | __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2); | |
1b8e83c0 | 372 | case 6: |
fb8232a4 | 373 | __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1); |
1b8e83c0 | 374 | default: |
fb8232a4 | 375 | __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0); |
1b8e83c0 MZ |
376 | } |
377 | ||
378 | for (i = 0; i <= max_lr_idx; i++) { | |
b40c4892 MZ |
379 | if (!(live_lrs & (1 << i))) |
380 | continue; | |
1b8e83c0 | 381 | |
b40c4892 | 382 | __gic_v3_set_lr(cpu_if->vgic_lr[i], i); |
1b8e83c0 | 383 | } |
14adc6b1 MZ |
384 | } else { |
385 | /* | |
386 | * If we need to trap system registers, we must write | |
387 | * ICH_HCR_EL2 anyway, even if no interrupts are being | |
388 | * injected, | |
389 | */ | |
390 | if (static_branch_unlikely(&vgic_v3_cpuif_trap)) | |
391 | write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); | |
f68d2b1b MZ |
392 | } |
393 | ||
394 | /* | |
395 | * Ensures that the above will have reached the | |
396 | * (re)distributors. This ensure the guest will read the | |
397 | * correct values from the memory-mapped interface. | |
398 | */ | |
c5851328 MZ |
399 | if (!cpu_if->vgic_sre) { |
400 | isb(); | |
401 | dsb(sy); | |
402 | } | |
1b8e83c0 | 403 | vcpu->arch.vgic_cpu.live_lrs = live_lrs; |
f68d2b1b MZ |
404 | |
405 | /* | |
406 | * Prevent the guest from touching the GIC system registers if | |
407 | * SRE isn't enabled for GICv3 emulation. | |
408 | */ | |
a057001e MZ |
409 | write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE, |
410 | ICC_SRE_EL2); | |
f68d2b1b MZ |
411 | } |
412 | ||
0d98d00b MZ |
413 | void __hyp_text __vgic_v3_init_lrs(void) |
414 | { | |
415 | int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2)); | |
416 | int i; | |
417 | ||
418 | for (i = 0; i <= max_lr_idx; i++) | |
419 | __gic_v3_set_lr(0, i); | |
420 | } | |
421 | ||
cf0ba18a | 422 | u64 __hyp_text __vgic_v3_get_ich_vtr_el2(void) |
f68d2b1b MZ |
423 | { |
424 | return read_gicreg(ICH_VTR_EL2); | |
425 | } | |
0c94a05f MZ |
426 | |
427 | u64 __hyp_text __vgic_v3_read_vmcr(void) | |
428 | { | |
429 | return read_gicreg(ICH_VMCR_EL2); | |
430 | } | |
431 | ||
ecffed38 MZ |
432 | void __hyp_text __vgic_v3_write_vmcr(u32 vmcr) |
433 | { | |
434 | write_gicreg(vmcr, ICH_VMCR_EL2); | |
435 | } | |
436 | ||
0c94a05f MZ |
437 | #ifdef CONFIG_ARM64 |
438 | ||
ecffed38 MZ |
439 | static int __hyp_text __vgic_v3_bpr_min(void) |
440 | { | |
441 | /* See Pseudocode for VPriorityGroup */ | |
442 | return 8 - vtr_to_nr_pre_bits(read_gicreg(ICH_VTR_EL2)); | |
443 | } | |
444 | ||
0b97ae72 MZ |
445 | static int __hyp_text __vgic_v3_get_group(struct kvm_vcpu *vcpu) |
446 | { | |
447 | u32 esr = kvm_vcpu_get_hsr(vcpu); | |
448 | u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT; | |
449 | ||
450 | return crm != 8; | |
451 | } | |
452 | ||
453 | #define GICv3_IDLE_PRIORITY 0xff | |
454 | ||
455 | static int __hyp_text __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu, | |
456 | u32 vmcr, | |
457 | u64 *lr_val) | |
458 | { | |
459 | unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs; | |
460 | u8 priority = GICv3_IDLE_PRIORITY; | |
461 | int i, lr = -1; | |
462 | ||
463 | for (i = 0; i < used_lrs; i++) { | |
464 | u64 val = __gic_v3_get_lr(i); | |
465 | u8 lr_prio = (val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT; | |
466 | ||
467 | /* Not pending in the state? */ | |
468 | if ((val & ICH_LR_STATE) != ICH_LR_PENDING_BIT) | |
469 | continue; | |
470 | ||
471 | /* Group-0 interrupt, but Group-0 disabled? */ | |
472 | if (!(val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG0_MASK)) | |
473 | continue; | |
474 | ||
475 | /* Group-1 interrupt, but Group-1 disabled? */ | |
476 | if ((val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG1_MASK)) | |
477 | continue; | |
478 | ||
479 | /* Not the highest priority? */ | |
480 | if (lr_prio >= priority) | |
481 | continue; | |
482 | ||
483 | /* This is a candidate */ | |
484 | priority = lr_prio; | |
485 | *lr_val = val; | |
486 | lr = i; | |
487 | } | |
488 | ||
489 | if (lr == -1) | |
490 | *lr_val = ICC_IAR1_EL1_SPURIOUS; | |
491 | ||
492 | return lr; | |
493 | } | |
494 | ||
d5d42491 MZ |
495 | static int __hyp_text __vgic_v3_find_active_lr(struct kvm_vcpu *vcpu, |
496 | int intid, u64 *lr_val) | |
497 | { | |
498 | unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs; | |
499 | int i; | |
500 | ||
501 | for (i = 0; i < used_lrs; i++) { | |
502 | u64 val = __gic_v3_get_lr(i); | |
503 | ||
504 | if ((val & ICH_LR_VIRTUAL_ID_MASK) == intid && | |
505 | (val & ICH_LR_ACTIVE_BIT)) { | |
506 | *lr_val = val; | |
507 | return i; | |
508 | } | |
509 | } | |
510 | ||
511 | *lr_val = ICC_IAR1_EL1_SPURIOUS; | |
512 | return -1; | |
513 | } | |
514 | ||
0b97ae72 MZ |
515 | static int __hyp_text __vgic_v3_get_highest_active_priority(void) |
516 | { | |
517 | u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2)); | |
518 | u32 hap = 0; | |
519 | int i; | |
520 | ||
521 | for (i = 0; i < nr_apr_regs; i++) { | |
522 | u32 val; | |
523 | ||
524 | /* | |
525 | * The ICH_AP0Rn_EL2 and ICH_AP1Rn_EL2 registers | |
526 | * contain the active priority levels for this VCPU | |
527 | * for the maximum number of supported priority | |
528 | * levels, and we return the full priority level only | |
529 | * if the BPR is programmed to its minimum, otherwise | |
530 | * we return a combination of the priority level and | |
531 | * subpriority, as determined by the setting of the | |
532 | * BPR, but without the full subpriority. | |
533 | */ | |
534 | val = __vgic_v3_read_ap0rn(i); | |
535 | val |= __vgic_v3_read_ap1rn(i); | |
536 | if (!val) { | |
537 | hap += 32; | |
538 | continue; | |
539 | } | |
540 | ||
541 | return (hap + __ffs(val)) << __vgic_v3_bpr_min(); | |
542 | } | |
543 | ||
544 | return GICv3_IDLE_PRIORITY; | |
545 | } | |
546 | ||
ecffed38 MZ |
547 | static unsigned int __hyp_text __vgic_v3_get_bpr0(u32 vmcr) |
548 | { | |
549 | return (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT; | |
550 | } | |
551 | ||
552 | static unsigned int __hyp_text __vgic_v3_get_bpr1(u32 vmcr) | |
553 | { | |
554 | unsigned int bpr; | |
555 | ||
556 | if (vmcr & ICH_VMCR_CBPR_MASK) { | |
557 | bpr = __vgic_v3_get_bpr0(vmcr); | |
558 | if (bpr < 7) | |
559 | bpr++; | |
560 | } else { | |
561 | bpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT; | |
562 | } | |
563 | ||
564 | return bpr; | |
565 | } | |
566 | ||
0b97ae72 MZ |
567 | /* |
568 | * Convert a priority to a preemption level, taking the relevant BPR | |
569 | * into account by zeroing the sub-priority bits. | |
570 | */ | |
571 | static u8 __hyp_text __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp) | |
572 | { | |
573 | unsigned int bpr; | |
574 | ||
575 | if (!grp) | |
576 | bpr = __vgic_v3_get_bpr0(vmcr) + 1; | |
577 | else | |
578 | bpr = __vgic_v3_get_bpr1(vmcr); | |
579 | ||
580 | return pri & (GENMASK(7, 0) << bpr); | |
581 | } | |
582 | ||
583 | /* | |
584 | * The priority value is independent of any of the BPR values, so we | |
585 | * normalize it using the minumal BPR value. This guarantees that no | |
586 | * matter what the guest does with its BPR, we can always set/get the | |
587 | * same value of a priority. | |
588 | */ | |
589 | static void __hyp_text __vgic_v3_set_active_priority(u8 pri, u32 vmcr, int grp) | |
590 | { | |
591 | u8 pre, ap; | |
592 | u32 val; | |
593 | int apr; | |
594 | ||
595 | pre = __vgic_v3_pri_to_pre(pri, vmcr, grp); | |
596 | ap = pre >> __vgic_v3_bpr_min(); | |
597 | apr = ap / 32; | |
598 | ||
599 | if (!grp) { | |
600 | val = __vgic_v3_read_ap0rn(apr); | |
601 | __vgic_v3_write_ap0rn(val | BIT(ap % 32), apr); | |
602 | } else { | |
603 | val = __vgic_v3_read_ap1rn(apr); | |
604 | __vgic_v3_write_ap1rn(val | BIT(ap % 32), apr); | |
605 | } | |
606 | } | |
607 | ||
d5d42491 MZ |
608 | static int __hyp_text __vgic_v3_clear_highest_active_priority(void) |
609 | { | |
610 | u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2)); | |
611 | u32 hap = 0; | |
612 | int i; | |
613 | ||
614 | for (i = 0; i < nr_apr_regs; i++) { | |
615 | u32 ap0, ap1; | |
616 | int c0, c1; | |
617 | ||
618 | ap0 = __vgic_v3_read_ap0rn(i); | |
619 | ap1 = __vgic_v3_read_ap1rn(i); | |
620 | if (!ap0 && !ap1) { | |
621 | hap += 32; | |
622 | continue; | |
623 | } | |
624 | ||
625 | c0 = ap0 ? __ffs(ap0) : 32; | |
626 | c1 = ap1 ? __ffs(ap1) : 32; | |
627 | ||
628 | /* Always clear the LSB, which is the highest priority */ | |
629 | if (c0 < c1) { | |
630 | ap0 &= ~BIT(c0); | |
631 | __vgic_v3_write_ap0rn(ap0, i); | |
632 | hap += c0; | |
633 | } else { | |
634 | ap1 &= ~BIT(c1); | |
635 | __vgic_v3_write_ap1rn(ap1, i); | |
636 | hap += c1; | |
637 | } | |
638 | ||
639 | /* Rescale to 8 bits of priority */ | |
640 | return hap << __vgic_v3_bpr_min(); | |
641 | } | |
642 | ||
643 | return GICv3_IDLE_PRIORITY; | |
644 | } | |
645 | ||
0b97ae72 MZ |
646 | static void __hyp_text __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
647 | { | |
648 | u64 lr_val; | |
649 | u8 lr_prio, pmr; | |
650 | int lr, grp; | |
651 | ||
652 | grp = __vgic_v3_get_group(vcpu); | |
653 | ||
654 | lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val); | |
655 | if (lr < 0) | |
656 | goto spurious; | |
657 | ||
658 | if (grp != !!(lr_val & ICH_LR_GROUP)) | |
659 | goto spurious; | |
660 | ||
661 | pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; | |
662 | lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT; | |
663 | if (pmr <= lr_prio) | |
664 | goto spurious; | |
665 | ||
666 | if (__vgic_v3_get_highest_active_priority() <= __vgic_v3_pri_to_pre(lr_prio, vmcr, grp)) | |
667 | goto spurious; | |
668 | ||
669 | lr_val &= ~ICH_LR_STATE; | |
670 | /* No active state for LPIs */ | |
671 | if ((lr_val & ICH_LR_VIRTUAL_ID_MASK) <= VGIC_MAX_SPI) | |
672 | lr_val |= ICH_LR_ACTIVE_BIT; | |
673 | __gic_v3_set_lr(lr_val, lr); | |
674 | __vgic_v3_set_active_priority(lr_prio, vmcr, grp); | |
675 | vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK); | |
676 | return; | |
677 | ||
678 | spurious: | |
679 | vcpu_set_reg(vcpu, rt, ICC_IAR1_EL1_SPURIOUS); | |
680 | } | |
681 | ||
d5d42491 MZ |
682 | static void __hyp_text __vgic_v3_clear_active_lr(int lr, u64 lr_val) |
683 | { | |
684 | lr_val &= ~ICH_LR_ACTIVE_BIT; | |
685 | if (lr_val & ICH_LR_HW) { | |
686 | u32 pid; | |
687 | ||
688 | pid = (lr_val & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT; | |
689 | gic_write_dir(pid); | |
690 | } | |
691 | ||
692 | __gic_v3_set_lr(lr_val, lr); | |
693 | } | |
694 | ||
695 | static void __hyp_text __vgic_v3_bump_eoicount(void) | |
696 | { | |
697 | u32 hcr; | |
698 | ||
699 | hcr = read_gicreg(ICH_HCR_EL2); | |
700 | hcr += 1 << ICH_HCR_EOIcount_SHIFT; | |
701 | write_gicreg(hcr, ICH_HCR_EL2); | |
702 | } | |
703 | ||
f1acc5bd MZ |
704 | static void __hyp_text __vgic_v3_write_dir(struct kvm_vcpu *vcpu, |
705 | u32 vmcr, int rt) | |
706 | { | |
707 | u32 vid = vcpu_get_reg(vcpu, rt); | |
708 | u64 lr_val; | |
709 | int lr; | |
710 | ||
711 | /* EOImode == 0, nothing to be done here */ | |
712 | if (!(vmcr & ICH_VMCR_EOIM_MASK)) | |
713 | return; | |
714 | ||
715 | /* No deactivate to be performed on an LPI */ | |
716 | if (vid >= VGIC_MIN_LPI) | |
717 | return; | |
718 | ||
719 | lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val); | |
720 | if (lr == -1) { | |
721 | __vgic_v3_bump_eoicount(); | |
722 | return; | |
723 | } | |
724 | ||
725 | __vgic_v3_clear_active_lr(lr, lr_val); | |
726 | } | |
727 | ||
d5d42491 MZ |
728 | static void __hyp_text __vgic_v3_write_eoir(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
729 | { | |
730 | u32 vid = vcpu_get_reg(vcpu, rt); | |
731 | u64 lr_val; | |
732 | u8 lr_prio, act_prio; | |
733 | int lr, grp; | |
734 | ||
735 | grp = __vgic_v3_get_group(vcpu); | |
736 | ||
737 | /* Drop priority in any case */ | |
738 | act_prio = __vgic_v3_clear_highest_active_priority(); | |
739 | ||
740 | /* If EOIing an LPI, no deactivate to be performed */ | |
741 | if (vid >= VGIC_MIN_LPI) | |
742 | return; | |
743 | ||
744 | /* EOImode == 1, nothing to be done here */ | |
745 | if (vmcr & ICH_VMCR_EOIM_MASK) | |
746 | return; | |
747 | ||
748 | lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val); | |
749 | if (lr == -1) { | |
750 | __vgic_v3_bump_eoicount(); | |
751 | return; | |
752 | } | |
753 | ||
754 | lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT; | |
755 | ||
756 | /* If priorities or group do not match, the guest has fscked-up. */ | |
757 | if (grp != !!(lr_val & ICH_LR_GROUP) || | |
758 | __vgic_v3_pri_to_pre(lr_prio, vmcr, grp) != act_prio) | |
759 | return; | |
760 | ||
761 | /* Let's now perform the deactivation */ | |
762 | __vgic_v3_clear_active_lr(lr, lr_val); | |
763 | } | |
764 | ||
81c0a9e7 MZ |
765 | static void __hyp_text __vgic_v3_read_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
766 | { | |
767 | vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG0_MASK)); | |
768 | } | |
769 | ||
74bb351e MZ |
770 | static void __hyp_text __vgic_v3_read_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
771 | { | |
772 | vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG1_MASK)); | |
773 | } | |
774 | ||
81c0a9e7 MZ |
775 | static void __hyp_text __vgic_v3_write_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
776 | { | |
777 | u64 val = vcpu_get_reg(vcpu, rt); | |
778 | ||
779 | if (val & 1) | |
780 | vmcr |= ICH_VMCR_ENG0_MASK; | |
781 | else | |
782 | vmcr &= ~ICH_VMCR_ENG0_MASK; | |
783 | ||
784 | __vgic_v3_write_vmcr(vmcr); | |
785 | } | |
786 | ||
74bb351e MZ |
787 | static void __hyp_text __vgic_v3_write_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
788 | { | |
789 | u64 val = vcpu_get_reg(vcpu, rt); | |
790 | ||
791 | if (val & 1) | |
792 | vmcr |= ICH_VMCR_ENG1_MASK; | |
793 | else | |
794 | vmcr &= ~ICH_VMCR_ENG1_MASK; | |
795 | ||
796 | __vgic_v3_write_vmcr(vmcr); | |
797 | } | |
798 | ||
9ca83b2d MZ |
799 | static void __hyp_text __vgic_v3_read_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
800 | { | |
801 | vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr0(vmcr)); | |
802 | } | |
803 | ||
ecffed38 MZ |
804 | static void __hyp_text __vgic_v3_read_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
805 | { | |
806 | vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr1(vmcr)); | |
807 | } | |
808 | ||
9ca83b2d MZ |
809 | static void __hyp_text __vgic_v3_write_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
810 | { | |
811 | u64 val = vcpu_get_reg(vcpu, rt); | |
812 | u8 bpr_min = __vgic_v3_bpr_min() - 1; | |
813 | ||
814 | /* Enforce BPR limiting */ | |
815 | if (val < bpr_min) | |
816 | val = bpr_min; | |
817 | ||
818 | val <<= ICH_VMCR_BPR0_SHIFT; | |
819 | val &= ICH_VMCR_BPR0_MASK; | |
820 | vmcr &= ~ICH_VMCR_BPR0_MASK; | |
821 | vmcr |= val; | |
822 | ||
823 | __vgic_v3_write_vmcr(vmcr); | |
824 | } | |
825 | ||
ecffed38 MZ |
826 | static void __hyp_text __vgic_v3_write_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
827 | { | |
828 | u64 val = vcpu_get_reg(vcpu, rt); | |
829 | u8 bpr_min = __vgic_v3_bpr_min(); | |
830 | ||
831 | if (vmcr & ICH_VMCR_CBPR_MASK) | |
832 | return; | |
833 | ||
834 | /* Enforce BPR limiting */ | |
835 | if (val < bpr_min) | |
836 | val = bpr_min; | |
837 | ||
838 | val <<= ICH_VMCR_BPR1_SHIFT; | |
839 | val &= ICH_VMCR_BPR1_MASK; | |
840 | vmcr &= ~ICH_VMCR_BPR1_MASK; | |
841 | vmcr |= val; | |
842 | ||
843 | __vgic_v3_write_vmcr(vmcr); | |
844 | } | |
845 | ||
a183d64b MZ |
846 | static void __hyp_text __vgic_v3_read_apxrn(struct kvm_vcpu *vcpu, int rt, int n) |
847 | { | |
848 | u32 val; | |
849 | ||
850 | if (!__vgic_v3_get_group(vcpu)) | |
851 | val = __vgic_v3_read_ap0rn(n); | |
852 | else | |
853 | val = __vgic_v3_read_ap1rn(n); | |
854 | ||
855 | vcpu_set_reg(vcpu, rt, val); | |
856 | } | |
857 | ||
858 | static void __hyp_text __vgic_v3_write_apxrn(struct kvm_vcpu *vcpu, int rt, int n) | |
859 | { | |
860 | u32 val = vcpu_get_reg(vcpu, rt); | |
861 | ||
862 | if (!__vgic_v3_get_group(vcpu)) | |
863 | __vgic_v3_write_ap0rn(val, n); | |
864 | else | |
865 | __vgic_v3_write_ap1rn(val, n); | |
866 | } | |
867 | ||
868 | static void __hyp_text __vgic_v3_read_apxr0(struct kvm_vcpu *vcpu, | |
869 | u32 vmcr, int rt) | |
870 | { | |
871 | __vgic_v3_read_apxrn(vcpu, rt, 0); | |
872 | } | |
873 | ||
874 | static void __hyp_text __vgic_v3_read_apxr1(struct kvm_vcpu *vcpu, | |
875 | u32 vmcr, int rt) | |
876 | { | |
877 | __vgic_v3_read_apxrn(vcpu, rt, 1); | |
878 | } | |
879 | ||
880 | static void __hyp_text __vgic_v3_read_apxr2(struct kvm_vcpu *vcpu, | |
881 | u32 vmcr, int rt) | |
882 | { | |
883 | __vgic_v3_read_apxrn(vcpu, rt, 2); | |
884 | } | |
885 | ||
886 | static void __hyp_text __vgic_v3_read_apxr3(struct kvm_vcpu *vcpu, | |
887 | u32 vmcr, int rt) | |
888 | { | |
889 | __vgic_v3_read_apxrn(vcpu, rt, 3); | |
890 | } | |
891 | ||
892 | static void __hyp_text __vgic_v3_write_apxr0(struct kvm_vcpu *vcpu, | |
893 | u32 vmcr, int rt) | |
894 | { | |
895 | __vgic_v3_write_apxrn(vcpu, rt, 0); | |
896 | } | |
897 | ||
898 | static void __hyp_text __vgic_v3_write_apxr1(struct kvm_vcpu *vcpu, | |
899 | u32 vmcr, int rt) | |
900 | { | |
901 | __vgic_v3_write_apxrn(vcpu, rt, 1); | |
902 | } | |
903 | ||
904 | static void __hyp_text __vgic_v3_write_apxr2(struct kvm_vcpu *vcpu, | |
905 | u32 vmcr, int rt) | |
906 | { | |
907 | __vgic_v3_write_apxrn(vcpu, rt, 2); | |
908 | } | |
909 | ||
910 | static void __hyp_text __vgic_v3_write_apxr3(struct kvm_vcpu *vcpu, | |
911 | u32 vmcr, int rt) | |
912 | { | |
913 | __vgic_v3_write_apxrn(vcpu, rt, 3); | |
914 | } | |
915 | ||
263870fc MZ |
916 | static void __hyp_text __vgic_v3_read_hppir(struct kvm_vcpu *vcpu, |
917 | u32 vmcr, int rt) | |
918 | { | |
919 | u64 lr_val; | |
920 | int lr, lr_grp, grp; | |
921 | ||
922 | grp = __vgic_v3_get_group(vcpu); | |
923 | ||
924 | lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val); | |
925 | if (lr == -1) | |
926 | goto spurious; | |
927 | ||
928 | lr_grp = !!(lr_val & ICH_LR_GROUP); | |
929 | if (lr_grp != grp) | |
930 | lr_val = ICC_IAR1_EL1_SPURIOUS; | |
931 | ||
932 | spurious: | |
933 | vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK); | |
934 | } | |
935 | ||
0c94a05f MZ |
936 | int __hyp_text __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu) |
937 | { | |
938 | int rt; | |
939 | u32 esr; | |
940 | u32 vmcr; | |
941 | void (*fn)(struct kvm_vcpu *, u32, int); | |
942 | bool is_read; | |
943 | u32 sysreg; | |
944 | ||
945 | esr = kvm_vcpu_get_hsr(vcpu); | |
946 | if (vcpu_mode_is_32bit(vcpu)) { | |
947 | if (!kvm_condition_valid(vcpu)) | |
948 | return 1; | |
949 | ||
950 | sysreg = esr_cp15_to_sysreg(esr); | |
951 | } else { | |
952 | sysreg = esr_sys64_to_sysreg(esr); | |
953 | } | |
954 | ||
955 | is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ; | |
956 | ||
957 | switch (sysreg) { | |
70c6e472 | 958 | case ICC_IAR0_EL1: |
0b97ae72 MZ |
959 | case ICC_IAR1_EL1: |
960 | fn = __vgic_v3_read_iar; | |
961 | break; | |
70c6e472 | 962 | case ICC_EOIR0_EL1: |
d5d42491 MZ |
963 | case ICC_EOIR1_EL1: |
964 | fn = __vgic_v3_write_eoir; | |
965 | break; | |
74bb351e MZ |
966 | case ICC_GRPEN1_EL1: |
967 | if (is_read) | |
968 | fn = __vgic_v3_read_igrpen1; | |
969 | else | |
970 | fn = __vgic_v3_write_igrpen1; | |
971 | break; | |
ecffed38 MZ |
972 | case ICC_BPR1_EL1: |
973 | if (is_read) | |
974 | fn = __vgic_v3_read_bpr1; | |
975 | else | |
976 | fn = __vgic_v3_write_bpr1; | |
977 | break; | |
70c6e472 | 978 | case ICC_AP0Rn_EL1(0): |
a183d64b MZ |
979 | case ICC_AP1Rn_EL1(0): |
980 | if (is_read) | |
981 | fn = __vgic_v3_read_apxr0; | |
982 | else | |
983 | fn = __vgic_v3_write_apxr0; | |
984 | break; | |
70c6e472 | 985 | case ICC_AP0Rn_EL1(1): |
a183d64b MZ |
986 | case ICC_AP1Rn_EL1(1): |
987 | if (is_read) | |
988 | fn = __vgic_v3_read_apxr1; | |
989 | else | |
990 | fn = __vgic_v3_write_apxr1; | |
991 | break; | |
70c6e472 | 992 | case ICC_AP0Rn_EL1(2): |
a183d64b MZ |
993 | case ICC_AP1Rn_EL1(2): |
994 | if (is_read) | |
995 | fn = __vgic_v3_read_apxr2; | |
996 | else | |
997 | fn = __vgic_v3_write_apxr2; | |
998 | break; | |
70c6e472 | 999 | case ICC_AP0Rn_EL1(3): |
a183d64b MZ |
1000 | case ICC_AP1Rn_EL1(3): |
1001 | if (is_read) | |
1002 | fn = __vgic_v3_read_apxr3; | |
1003 | else | |
1004 | fn = __vgic_v3_write_apxr3; | |
1005 | break; | |
70c6e472 | 1006 | case ICC_HPPIR0_EL1: |
263870fc MZ |
1007 | case ICC_HPPIR1_EL1: |
1008 | fn = __vgic_v3_read_hppir; | |
1009 | break; | |
81c0a9e7 MZ |
1010 | case ICC_GRPEN0_EL1: |
1011 | if (is_read) | |
1012 | fn = __vgic_v3_read_igrpen0; | |
1013 | else | |
1014 | fn = __vgic_v3_write_igrpen0; | |
1015 | break; | |
9ca83b2d MZ |
1016 | case ICC_BPR0_EL1: |
1017 | if (is_read) | |
1018 | fn = __vgic_v3_read_bpr0; | |
1019 | else | |
1020 | fn = __vgic_v3_write_bpr0; | |
1021 | break; | |
f1acc5bd MZ |
1022 | case ICC_DIR_EL1: |
1023 | fn = __vgic_v3_write_dir; | |
1024 | break; | |
0c94a05f MZ |
1025 | default: |
1026 | return 0; | |
1027 | } | |
1028 | ||
1029 | vmcr = __vgic_v3_read_vmcr(); | |
1030 | rt = kvm_vcpu_sys_get_rt(vcpu); | |
1031 | fn(vcpu, vmcr, rt); | |
1032 | ||
1033 | return 1; | |
1034 | } | |
1035 | ||
1036 | #endif |