]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - virt/kvm/arm/hyp/vgic-v3-sr.c
KVM: arm/arm64: vgic-v3: Use PREbits to infer the number of ICH_APxRn_EL2 registers
[mirror_ubuntu-artful-kernel.git] / virt / kvm / arm / hyp / vgic-v3-sr.c
1 /*
2 * Copyright (C) 2012-2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include <linux/compiler.h>
19 #include <linux/irqchip/arm-gic-v3.h>
20 #include <linux/kvm_host.h>
21
22 #include <asm/kvm_hyp.h>
23
24 #define vtr_to_max_lr_idx(v) ((v) & 0xf)
25 #define vtr_to_nr_pre_bits(v) (((u32)(v) >> 26) + 1)
26
27 static u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
28 {
29 switch (lr & 0xf) {
30 case 0:
31 return read_gicreg(ICH_LR0_EL2);
32 case 1:
33 return read_gicreg(ICH_LR1_EL2);
34 case 2:
35 return read_gicreg(ICH_LR2_EL2);
36 case 3:
37 return read_gicreg(ICH_LR3_EL2);
38 case 4:
39 return read_gicreg(ICH_LR4_EL2);
40 case 5:
41 return read_gicreg(ICH_LR5_EL2);
42 case 6:
43 return read_gicreg(ICH_LR6_EL2);
44 case 7:
45 return read_gicreg(ICH_LR7_EL2);
46 case 8:
47 return read_gicreg(ICH_LR8_EL2);
48 case 9:
49 return read_gicreg(ICH_LR9_EL2);
50 case 10:
51 return read_gicreg(ICH_LR10_EL2);
52 case 11:
53 return read_gicreg(ICH_LR11_EL2);
54 case 12:
55 return read_gicreg(ICH_LR12_EL2);
56 case 13:
57 return read_gicreg(ICH_LR13_EL2);
58 case 14:
59 return read_gicreg(ICH_LR14_EL2);
60 case 15:
61 return read_gicreg(ICH_LR15_EL2);
62 }
63
64 unreachable();
65 }
66
67 static void __hyp_text __gic_v3_set_lr(u64 val, int lr)
68 {
69 switch (lr & 0xf) {
70 case 0:
71 write_gicreg(val, ICH_LR0_EL2);
72 break;
73 case 1:
74 write_gicreg(val, ICH_LR1_EL2);
75 break;
76 case 2:
77 write_gicreg(val, ICH_LR2_EL2);
78 break;
79 case 3:
80 write_gicreg(val, ICH_LR3_EL2);
81 break;
82 case 4:
83 write_gicreg(val, ICH_LR4_EL2);
84 break;
85 case 5:
86 write_gicreg(val, ICH_LR5_EL2);
87 break;
88 case 6:
89 write_gicreg(val, ICH_LR6_EL2);
90 break;
91 case 7:
92 write_gicreg(val, ICH_LR7_EL2);
93 break;
94 case 8:
95 write_gicreg(val, ICH_LR8_EL2);
96 break;
97 case 9:
98 write_gicreg(val, ICH_LR9_EL2);
99 break;
100 case 10:
101 write_gicreg(val, ICH_LR10_EL2);
102 break;
103 case 11:
104 write_gicreg(val, ICH_LR11_EL2);
105 break;
106 case 12:
107 write_gicreg(val, ICH_LR12_EL2);
108 break;
109 case 13:
110 write_gicreg(val, ICH_LR13_EL2);
111 break;
112 case 14:
113 write_gicreg(val, ICH_LR14_EL2);
114 break;
115 case 15:
116 write_gicreg(val, ICH_LR15_EL2);
117 break;
118 }
119 }
120
121 void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
122 {
123 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
124 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
125 u64 val;
126
127 /*
128 * Make sure stores to the GIC via the memory mapped interface
129 * are now visible to the system register interface.
130 */
131 if (!cpu_if->vgic_sre) {
132 dsb(st);
133 cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
134 }
135
136 if (used_lrs) {
137 int i;
138 u32 nr_pre_bits;
139
140 cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2);
141
142 write_gicreg(0, ICH_HCR_EL2);
143 val = read_gicreg(ICH_VTR_EL2);
144 nr_pre_bits = vtr_to_nr_pre_bits(val);
145
146 for (i = 0; i < used_lrs; i++) {
147 if (cpu_if->vgic_elrsr & (1 << i))
148 cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
149 else
150 cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
151
152 __gic_v3_set_lr(0, i);
153 }
154
155 switch (nr_pre_bits) {
156 case 7:
157 cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2);
158 cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2);
159 case 6:
160 cpu_if->vgic_ap0r[1] = read_gicreg(ICH_AP0R1_EL2);
161 default:
162 cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2);
163 }
164
165 switch (nr_pre_bits) {
166 case 7:
167 cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2);
168 cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2);
169 case 6:
170 cpu_if->vgic_ap1r[1] = read_gicreg(ICH_AP1R1_EL2);
171 default:
172 cpu_if->vgic_ap1r[0] = read_gicreg(ICH_AP1R0_EL2);
173 }
174 } else {
175 cpu_if->vgic_elrsr = 0xffff;
176 cpu_if->vgic_ap0r[0] = 0;
177 cpu_if->vgic_ap0r[1] = 0;
178 cpu_if->vgic_ap0r[2] = 0;
179 cpu_if->vgic_ap0r[3] = 0;
180 cpu_if->vgic_ap1r[0] = 0;
181 cpu_if->vgic_ap1r[1] = 0;
182 cpu_if->vgic_ap1r[2] = 0;
183 cpu_if->vgic_ap1r[3] = 0;
184 }
185
186 val = read_gicreg(ICC_SRE_EL2);
187 write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
188
189 if (!cpu_if->vgic_sre) {
190 /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
191 isb();
192 write_gicreg(1, ICC_SRE_EL1);
193 }
194 }
195
196 void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
197 {
198 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
199 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
200 u64 val;
201 u32 nr_pre_bits;
202 int i;
203
204 /*
205 * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
206 * Group0 interrupt (as generated in GICv2 mode) to be
207 * delivered as a FIQ to the guest, with potentially fatal
208 * consequences. So we must make sure that ICC_SRE_EL1 has
209 * been actually programmed with the value we want before
210 * starting to mess with the rest of the GIC, and VMCR_EL2 in
211 * particular.
212 */
213 if (!cpu_if->vgic_sre) {
214 write_gicreg(0, ICC_SRE_EL1);
215 isb();
216 write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
217 }
218
219 val = read_gicreg(ICH_VTR_EL2);
220 nr_pre_bits = vtr_to_nr_pre_bits(val);
221
222 if (used_lrs) {
223 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
224
225 switch (nr_pre_bits) {
226 case 7:
227 write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2);
228 write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2);
229 case 6:
230 write_gicreg(cpu_if->vgic_ap0r[1], ICH_AP0R1_EL2);
231 default:
232 write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2);
233 }
234
235 switch (nr_pre_bits) {
236 case 7:
237 write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2);
238 write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2);
239 case 6:
240 write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2);
241 default:
242 write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2);
243 }
244
245 for (i = 0; i < used_lrs; i++)
246 __gic_v3_set_lr(cpu_if->vgic_lr[i], i);
247 }
248
249 /*
250 * Ensures that the above will have reached the
251 * (re)distributors. This ensure the guest will read the
252 * correct values from the memory-mapped interface.
253 */
254 if (!cpu_if->vgic_sre) {
255 isb();
256 dsb(sy);
257 }
258
259 /*
260 * Prevent the guest from touching the GIC system registers if
261 * SRE isn't enabled for GICv3 emulation.
262 */
263 write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
264 ICC_SRE_EL2);
265 }
266
267 void __hyp_text __vgic_v3_init_lrs(void)
268 {
269 int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2));
270 int i;
271
272 for (i = 0; i <= max_lr_idx; i++)
273 __gic_v3_set_lr(0, i);
274 }
275
276 u64 __hyp_text __vgic_v3_get_ich_vtr_el2(void)
277 {
278 return read_gicreg(ICH_VTR_EL2);
279 }
280
281 u64 __hyp_text __vgic_v3_read_vmcr(void)
282 {
283 return read_gicreg(ICH_VMCR_EL2);
284 }
285
286 void __hyp_text __vgic_v3_write_vmcr(u32 vmcr)
287 {
288 write_gicreg(vmcr, ICH_VMCR_EL2);
289 }