]>
Commit | Line | Data |
---|---|---|
ff8f06ee SP |
1 | /* |
2 | * ARM GICv3 support - common bits of emulated and KVM kernel model | |
3 | * | |
4 | * Copyright (c) 2012 Linaro Limited | |
5 | * Copyright (c) 2015 Huawei. | |
07e2034d | 6 | * Copyright (c) 2015 Samsung Electronics Co., Ltd. |
ff8f06ee | 7 | * Written by Peter Maydell |
07e2034d | 8 | * Reworked for GICv3 by Shlomo Pongratz and Pavel Fedin |
ff8f06ee SP |
9 | * |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License as published by | |
12 | * the Free Software Foundation, either version 2 of the License, or | |
13 | * (at your option) any later version. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, | |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
18 | * GNU General Public License for more details. | |
19 | * | |
20 | * You should have received a copy of the GNU General Public License along | |
21 | * with this program; if not, see <http://www.gnu.org/licenses/>. | |
22 | */ | |
23 | ||
8ef94f0b | 24 | #include "qemu/osdep.h" |
da34e65c | 25 | #include "qapi/error.h" |
0b8fa32f | 26 | #include "qemu/module.h" |
2e5b09fd | 27 | #include "hw/core/cpu.h" |
ff8f06ee | 28 | #include "hw/intc/arm_gicv3_common.h" |
a27bd6c7 | 29 | #include "hw/qdev-properties.h" |
d6454270 | 30 | #include "migration/vmstate.h" |
07e2034d PF |
31 | #include "gicv3_internal.h" |
32 | #include "hw/arm/linux-boot-if.h" | |
910e2048 | 33 | #include "sysemu/kvm.h" |
ff8f06ee | 34 | |
341823c1 PM |
35 | |
36 | static void gicv3_gicd_no_migration_shift_bug_post_load(GICv3State *cs) | |
37 | { | |
38 | if (cs->gicd_no_migration_shift_bug) { | |
39 | return; | |
40 | } | |
41 | ||
42 | /* Older versions of QEMU had a bug in the handling of state save/restore | |
43 | * to the KVM GICv3: they got the offset in the bitmap arrays wrong, | |
44 | * so that instead of the data for external interrupts 32 and up | |
45 | * starting at bit position 32 in the bitmap, it started at bit | |
46 | * position 64. If we're receiving data from a QEMU with that bug, | |
47 | * we must move the data down into the right place. | |
48 | */ | |
49 | memmove(cs->group, (uint8_t *)cs->group + GIC_INTERNAL / 8, | |
50 | sizeof(cs->group) - GIC_INTERNAL / 8); | |
51 | memmove(cs->grpmod, (uint8_t *)cs->grpmod + GIC_INTERNAL / 8, | |
52 | sizeof(cs->grpmod) - GIC_INTERNAL / 8); | |
53 | memmove(cs->enabled, (uint8_t *)cs->enabled + GIC_INTERNAL / 8, | |
54 | sizeof(cs->enabled) - GIC_INTERNAL / 8); | |
55 | memmove(cs->pending, (uint8_t *)cs->pending + GIC_INTERNAL / 8, | |
56 | sizeof(cs->pending) - GIC_INTERNAL / 8); | |
57 | memmove(cs->active, (uint8_t *)cs->active + GIC_INTERNAL / 8, | |
58 | sizeof(cs->active) - GIC_INTERNAL / 8); | |
59 | memmove(cs->edge_trigger, (uint8_t *)cs->edge_trigger + GIC_INTERNAL / 8, | |
60 | sizeof(cs->edge_trigger) - GIC_INTERNAL / 8); | |
61 | ||
62 | /* | |
63 | * While this new version QEMU doesn't have this kind of bug as we fix it, | |
64 | * so it needs to set the flag to true to indicate that and it's necessary | |
65 | * for next migration to work from this new version QEMU. | |
66 | */ | |
67 | cs->gicd_no_migration_shift_bug = true; | |
68 | } | |
69 | ||
44b1ff31 | 70 | static int gicv3_pre_save(void *opaque) |
ff8f06ee SP |
71 | { |
72 | GICv3State *s = (GICv3State *)opaque; | |
73 | ARMGICv3CommonClass *c = ARM_GICV3_COMMON_GET_CLASS(s); | |
74 | ||
75 | if (c->pre_save) { | |
76 | c->pre_save(s); | |
77 | } | |
44b1ff31 DDAG |
78 | |
79 | return 0; | |
ff8f06ee SP |
80 | } |
81 | ||
82 | static int gicv3_post_load(void *opaque, int version_id) | |
83 | { | |
84 | GICv3State *s = (GICv3State *)opaque; | |
85 | ARMGICv3CommonClass *c = ARM_GICV3_COMMON_GET_CLASS(s); | |
86 | ||
341823c1 PM |
87 | gicv3_gicd_no_migration_shift_bug_post_load(s); |
88 | ||
ff8f06ee SP |
89 | if (c->post_load) { |
90 | c->post_load(s); | |
91 | } | |
92 | return 0; | |
93 | } | |
94 | ||
4eb833b5 PM |
95 | static bool virt_state_needed(void *opaque) |
96 | { | |
97 | GICv3CPUState *cs = opaque; | |
98 | ||
99 | return cs->num_list_regs != 0; | |
100 | } | |
101 | ||
102 | static const VMStateDescription vmstate_gicv3_cpu_virt = { | |
103 | .name = "arm_gicv3_cpu/virt", | |
104 | .version_id = 1, | |
105 | .minimum_version_id = 1, | |
106 | .needed = virt_state_needed, | |
107 | .fields = (VMStateField[]) { | |
108 | VMSTATE_UINT64_2DARRAY(ich_apr, GICv3CPUState, 3, 4), | |
109 | VMSTATE_UINT64(ich_hcr_el2, GICv3CPUState), | |
110 | VMSTATE_UINT64_ARRAY(ich_lr_el2, GICv3CPUState, GICV3_LR_MAX), | |
111 | VMSTATE_UINT64(ich_vmcr_el2, GICv3CPUState), | |
112 | VMSTATE_END_OF_LIST() | |
113 | } | |
114 | }; | |
115 | ||
326049cc | 116 | static int vmstate_gicv3_cpu_pre_load(void *opaque) |
6692aac4 VK |
117 | { |
118 | GICv3CPUState *cs = opaque; | |
119 | ||
120 | /* | |
121 | * If the sre_el1 subsection is not transferred this | |
122 | * means SRE_EL1 is 0x7 (which might not be the same as | |
123 | * our reset value). | |
124 | */ | |
125 | cs->icc_sre_el1 = 0x7; | |
126 | return 0; | |
127 | } | |
128 | ||
129 | static bool icc_sre_el1_reg_needed(void *opaque) | |
130 | { | |
131 | GICv3CPUState *cs = opaque; | |
132 | ||
133 | return cs->icc_sre_el1 != 7; | |
134 | } | |
135 | ||
136 | const VMStateDescription vmstate_gicv3_cpu_sre_el1 = { | |
137 | .name = "arm_gicv3_cpu/sre_el1", | |
138 | .version_id = 1, | |
139 | .minimum_version_id = 1, | |
6692aac4 VK |
140 | .needed = icc_sre_el1_reg_needed, |
141 | .fields = (VMStateField[]) { | |
142 | VMSTATE_UINT64(icc_sre_el1, GICv3CPUState), | |
143 | VMSTATE_END_OF_LIST() | |
144 | } | |
145 | }; | |
146 | ||
757caeed PF |
147 | static const VMStateDescription vmstate_gicv3_cpu = { |
148 | .name = "arm_gicv3_cpu", | |
149 | .version_id = 1, | |
150 | .minimum_version_id = 1, | |
326049cc | 151 | .pre_load = vmstate_gicv3_cpu_pre_load, |
757caeed PF |
152 | .fields = (VMStateField[]) { |
153 | VMSTATE_UINT32(level, GICv3CPUState), | |
154 | VMSTATE_UINT32(gicr_ctlr, GICv3CPUState), | |
155 | VMSTATE_UINT32_ARRAY(gicr_statusr, GICv3CPUState, 2), | |
156 | VMSTATE_UINT32(gicr_waker, GICv3CPUState), | |
157 | VMSTATE_UINT64(gicr_propbaser, GICv3CPUState), | |
158 | VMSTATE_UINT64(gicr_pendbaser, GICv3CPUState), | |
159 | VMSTATE_UINT32(gicr_igroupr0, GICv3CPUState), | |
160 | VMSTATE_UINT32(gicr_ienabler0, GICv3CPUState), | |
161 | VMSTATE_UINT32(gicr_ipendr0, GICv3CPUState), | |
162 | VMSTATE_UINT32(gicr_iactiver0, GICv3CPUState), | |
163 | VMSTATE_UINT32(edge_trigger, GICv3CPUState), | |
164 | VMSTATE_UINT32(gicr_igrpmodr0, GICv3CPUState), | |
165 | VMSTATE_UINT32(gicr_nsacr, GICv3CPUState), | |
166 | VMSTATE_UINT8_ARRAY(gicr_ipriorityr, GICv3CPUState, GIC_INTERNAL), | |
167 | VMSTATE_UINT64_ARRAY(icc_ctlr_el1, GICv3CPUState, 2), | |
168 | VMSTATE_UINT64(icc_pmr_el1, GICv3CPUState), | |
169 | VMSTATE_UINT64_ARRAY(icc_bpr, GICv3CPUState, 3), | |
170 | VMSTATE_UINT64_2DARRAY(icc_apr, GICv3CPUState, 3, 4), | |
171 | VMSTATE_UINT64_ARRAY(icc_igrpen, GICv3CPUState, 3), | |
172 | VMSTATE_UINT64(icc_ctlr_el3, GICv3CPUState), | |
173 | VMSTATE_END_OF_LIST() | |
4eb833b5 PM |
174 | }, |
175 | .subsections = (const VMStateDescription * []) { | |
176 | &vmstate_gicv3_cpu_virt, | |
6692aac4 VK |
177 | &vmstate_gicv3_cpu_sre_el1, |
178 | NULL | |
757caeed PF |
179 | } |
180 | }; | |
181 | ||
326049cc | 182 | static int gicv3_pre_load(void *opaque) |
910e2048 SZ |
183 | { |
184 | GICv3State *cs = opaque; | |
185 | ||
186 | /* | |
187 | * The gicd_no_migration_shift_bug flag is used for migration compatibility | |
188 | * for old version QEMU which may have the GICD bmp shift bug under KVM mode. | |
189 | * Strictly, what we want to know is whether the migration source is using | |
190 | * KVM. Since we don't have any way to determine that, we look at whether the | |
191 | * destination is using KVM; this is close enough because for the older QEMU | |
192 | * versions with this bug KVM -> TCG migration didn't work anyway. If the | |
193 | * source is a newer QEMU without this bug it will transmit the migration | |
194 | * subsection which sets the flag to true; otherwise it will remain set to | |
195 | * the value we select here. | |
196 | */ | |
197 | if (kvm_enabled()) { | |
198 | cs->gicd_no_migration_shift_bug = false; | |
199 | } | |
200 | ||
201 | return 0; | |
202 | } | |
203 | ||
78e9ddd7 PM |
204 | static bool needed_always(void *opaque) |
205 | { | |
206 | return true; | |
207 | } | |
208 | ||
910e2048 SZ |
209 | const VMStateDescription vmstate_gicv3_gicd_no_migration_shift_bug = { |
210 | .name = "arm_gicv3/gicd_no_migration_shift_bug", | |
211 | .version_id = 1, | |
212 | .minimum_version_id = 1, | |
78e9ddd7 | 213 | .needed = needed_always, |
910e2048 SZ |
214 | .fields = (VMStateField[]) { |
215 | VMSTATE_BOOL(gicd_no_migration_shift_bug, GICv3State), | |
216 | VMSTATE_END_OF_LIST() | |
217 | } | |
218 | }; | |
219 | ||
ff8f06ee SP |
220 | static const VMStateDescription vmstate_gicv3 = { |
221 | .name = "arm_gicv3", | |
757caeed PF |
222 | .version_id = 1, |
223 | .minimum_version_id = 1, | |
326049cc | 224 | .pre_load = gicv3_pre_load, |
ff8f06ee SP |
225 | .pre_save = gicv3_pre_save, |
226 | .post_load = gicv3_post_load, | |
252a7a6a | 227 | .priority = MIG_PRI_GICV3, |
757caeed PF |
228 | .fields = (VMStateField[]) { |
229 | VMSTATE_UINT32(gicd_ctlr, GICv3State), | |
230 | VMSTATE_UINT32_ARRAY(gicd_statusr, GICv3State, 2), | |
231 | VMSTATE_UINT32_ARRAY(group, GICv3State, GICV3_BMP_SIZE), | |
232 | VMSTATE_UINT32_ARRAY(grpmod, GICv3State, GICV3_BMP_SIZE), | |
233 | VMSTATE_UINT32_ARRAY(enabled, GICv3State, GICV3_BMP_SIZE), | |
234 | VMSTATE_UINT32_ARRAY(pending, GICv3State, GICV3_BMP_SIZE), | |
235 | VMSTATE_UINT32_ARRAY(active, GICv3State, GICV3_BMP_SIZE), | |
236 | VMSTATE_UINT32_ARRAY(level, GICv3State, GICV3_BMP_SIZE), | |
237 | VMSTATE_UINT32_ARRAY(edge_trigger, GICv3State, GICV3_BMP_SIZE), | |
238 | VMSTATE_UINT8_ARRAY(gicd_ipriority, GICv3State, GICV3_MAXIRQ), | |
239 | VMSTATE_UINT64_ARRAY(gicd_irouter, GICv3State, GICV3_MAXIRQ), | |
240 | VMSTATE_UINT32_ARRAY(gicd_nsacr, GICv3State, | |
241 | DIV_ROUND_UP(GICV3_MAXIRQ, 16)), | |
242 | VMSTATE_STRUCT_VARRAY_POINTER_UINT32(cpu, GICv3State, num_cpu, | |
243 | vmstate_gicv3_cpu, GICv3CPUState), | |
244 | VMSTATE_END_OF_LIST() | |
910e2048 SZ |
245 | }, |
246 | .subsections = (const VMStateDescription * []) { | |
247 | &vmstate_gicv3_gicd_no_migration_shift_bug, | |
248 | NULL | |
757caeed | 249 | } |
ff8f06ee SP |
250 | }; |
251 | ||
252 | void gicv3_init_irqs_and_mmio(GICv3State *s, qemu_irq_handler handler, | |
01b5ab8c | 253 | const MemoryRegionOps *ops) |
ff8f06ee SP |
254 | { |
255 | SysBusDevice *sbd = SYS_BUS_DEVICE(s); | |
256 | int i; | |
e5cba10e | 257 | int cpuidx; |
ff8f06ee SP |
258 | |
259 | /* For the GIC, also expose incoming GPIO lines for PPIs for each CPU. | |
260 | * GPIO array layout is thus: | |
261 | * [0..N-1] spi | |
262 | * [N..N+31] PPIs for CPU 0 | |
263 | * [N+32..N+63] PPIs for CPU 1 | |
264 | * ... | |
265 | */ | |
266 | i = s->num_irq - GIC_INTERNAL + GIC_INTERNAL * s->num_cpu; | |
267 | qdev_init_gpio_in(DEVICE(s), handler, i); | |
268 | ||
ff8f06ee | 269 | for (i = 0; i < s->num_cpu; i++) { |
3faf2b0c | 270 | sysbus_init_irq(sbd, &s->cpu[i].parent_irq); |
ff8f06ee SP |
271 | } |
272 | for (i = 0; i < s->num_cpu; i++) { | |
3faf2b0c | 273 | sysbus_init_irq(sbd, &s->cpu[i].parent_fiq); |
ff8f06ee | 274 | } |
b53db42b PM |
275 | for (i = 0; i < s->num_cpu; i++) { |
276 | sysbus_init_irq(sbd, &s->cpu[i].parent_virq); | |
277 | } | |
278 | for (i = 0; i < s->num_cpu; i++) { | |
279 | sysbus_init_irq(sbd, &s->cpu[i].parent_vfiq); | |
280 | } | |
ff8f06ee SP |
281 | |
282 | memory_region_init_io(&s->iomem_dist, OBJECT(s), ops, s, | |
283 | "gicv3_dist", 0x10000); | |
ff8f06ee | 284 | sysbus_init_mmio(sbd, &s->iomem_dist); |
1e575b66 | 285 | |
e5cba10e PM |
286 | s->redist_regions = g_new0(GICv3RedistRegion, s->nb_redist_regions); |
287 | cpuidx = 0; | |
1e575b66 EA |
288 | for (i = 0; i < s->nb_redist_regions; i++) { |
289 | char *name = g_strdup_printf("gicv3_redist_region[%d]", i); | |
e5cba10e | 290 | GICv3RedistRegion *region = &s->redist_regions[i]; |
1e575b66 | 291 | |
e5cba10e PM |
292 | region->gic = s; |
293 | region->cpuidx = cpuidx; | |
294 | cpuidx += s->redist_region_count[i]; | |
295 | ||
296 | memory_region_init_io(®ion->iomem, OBJECT(s), | |
297 | ops ? &ops[1] : NULL, region, name, | |
ae3b3ba1 | 298 | s->redist_region_count[i] * gicv3_redist_size(s)); |
e5cba10e | 299 | sysbus_init_mmio(sbd, ®ion->iomem); |
1e575b66 EA |
300 | g_free(name); |
301 | } | |
ff8f06ee SP |
302 | } |
303 | ||
304 | static void arm_gicv3_common_realize(DeviceState *dev, Error **errp) | |
305 | { | |
306 | GICv3State *s = ARM_GICV3_COMMON(dev); | |
04616415 | 307 | int i, rdist_capacity, cpuidx; |
ff8f06ee SP |
308 | |
309 | /* revision property is actually reserved and currently used only in order | |
310 | * to keep the interface compatible with GICv2 code, avoiding extra | |
311 | * conditions. However, in future it could be used, for example, if we | |
312 | * implement GICv4. | |
313 | */ | |
314 | if (s->revision != 3) { | |
315 | error_setg(errp, "unsupported GIC revision %d", s->revision); | |
316 | return; | |
317 | } | |
07e2034d PF |
318 | |
319 | if (s->num_irq > GICV3_MAXIRQ) { | |
320 | error_setg(errp, | |
321 | "requested %u interrupt lines exceeds GIC maximum %d", | |
322 | s->num_irq, GICV3_MAXIRQ); | |
323 | return; | |
324 | } | |
325 | if (s->num_irq < GIC_INTERNAL) { | |
326 | error_setg(errp, | |
327 | "requested %u interrupt lines is below GIC minimum %d", | |
328 | s->num_irq, GIC_INTERNAL); | |
329 | return; | |
330 | } | |
89ac9d0c PM |
331 | if (s->num_cpu == 0) { |
332 | error_setg(errp, "num-cpu must be at least 1"); | |
333 | return; | |
334 | } | |
07e2034d PF |
335 | |
336 | /* ITLinesNumber is represented as (N / 32) - 1, so this is an | |
337 | * implementation imposed restriction, not an architectural one, | |
338 | * so we don't have to deal with bitfields where only some of the | |
339 | * bits in a 32-bit word should be valid. | |
340 | */ | |
341 | if (s->num_irq % 32) { | |
342 | error_setg(errp, | |
343 | "%d interrupt lines unsupported: not divisible by 32", | |
344 | s->num_irq); | |
345 | return; | |
346 | } | |
347 | ||
ac30dec3 SM |
348 | if (s->lpi_enable && !s->dma) { |
349 | error_setg(errp, "Redist-ITS: Guest 'sysmem' reference link not set"); | |
350 | return; | |
351 | } | |
352 | ||
01b5ab8c PM |
353 | rdist_capacity = 0; |
354 | for (i = 0; i < s->nb_redist_regions; i++) { | |
355 | rdist_capacity += s->redist_region_count[i]; | |
356 | } | |
671927a1 | 357 | if (rdist_capacity != s->num_cpu) { |
01b5ab8c | 358 | error_setg(errp, "Capacity of the redist regions(%d) " |
671927a1 | 359 | "does not match the number of vcpus(%d)", |
01b5ab8c PM |
360 | rdist_capacity, s->num_cpu); |
361 | return; | |
362 | } | |
363 | ||
e5ff041f PM |
364 | if (s->lpi_enable) { |
365 | address_space_init(&s->dma_as, s->dma, | |
366 | "gicv3-its-sysmem"); | |
367 | } | |
368 | ||
07e2034d PF |
369 | s->cpu = g_new0(GICv3CPUState, s->num_cpu); |
370 | ||
371 | for (i = 0; i < s->num_cpu; i++) { | |
372 | CPUState *cpu = qemu_get_cpu(i); | |
373 | uint64_t cpu_affid; | |
07e2034d PF |
374 | |
375 | s->cpu[i].cpu = cpu; | |
376 | s->cpu[i].gic = s; | |
d3a3e529 VK |
377 | /* Store GICv3CPUState in CPUARMState gicv3state pointer */ |
378 | gicv3_set_gicv3state(cpu, &s->cpu[i]); | |
07e2034d PF |
379 | |
380 | /* Pre-construct the GICR_TYPER: | |
381 | * For our implementation: | |
382 | * Top 32 bits are the affinity value of the associated CPU | |
383 | * CommonLPIAff == 01 (redistributors with same Aff3 share LPI table) | |
384 | * Processor_Number == CPU index starting from 0 | |
385 | * DPGS == 0 (GICR_CTLR.DPG* not supported) | |
386 | * Last == 1 if this is the last redistributor in a series of | |
387 | * contiguous redistributor pages | |
388 | * DirectLPI == 0 (direct injection of LPIs not supported) | |
389 | * VLPIS == 0 (virtual LPIs not supported) | |
390 | * PLPIS == 0 (physical LPIs not supported) | |
391 | */ | |
77a7a367 | 392 | cpu_affid = object_property_get_uint(OBJECT(cpu), "mp-affinity", NULL); |
07e2034d PF |
393 | |
394 | /* The CPU mp-affinity property is in MPIDR register format; squash | |
395 | * the affinity bytes into 32 bits as the GICR_TYPER has them. | |
396 | */ | |
92204403 AJ |
397 | cpu_affid = ((cpu_affid & 0xFF00000000ULL) >> 8) | |
398 | (cpu_affid & 0xFFFFFF); | |
07e2034d PF |
399 | s->cpu[i].gicr_typer = (cpu_affid << 32) | |
400 | (1 << 24) | | |
04616415 | 401 | (i << 8); |
ac30dec3 SM |
402 | |
403 | if (s->lpi_enable) { | |
404 | s->cpu[i].gicr_typer |= GICR_TYPER_PLPIS; | |
405 | } | |
07e2034d | 406 | } |
04616415 PM |
407 | |
408 | /* | |
409 | * Now go through and set GICR_TYPER.Last for the final | |
410 | * redistributor in each region. | |
411 | */ | |
412 | cpuidx = 0; | |
413 | for (i = 0; i < s->nb_redist_regions; i++) { | |
414 | cpuidx += s->redist_region_count[i]; | |
415 | s->cpu[cpuidx - 1].gicr_typer |= GICR_TYPER_LAST; | |
416 | } | |
7c087bd3 PM |
417 | |
418 | s->itslist = g_ptr_array_new(); | |
ff8f06ee SP |
419 | } |
420 | ||
1e575b66 EA |
421 | static void arm_gicv3_finalize(Object *obj) |
422 | { | |
423 | GICv3State *s = ARM_GICV3_COMMON(obj); | |
424 | ||
425 | g_free(s->redist_region_count); | |
426 | } | |
427 | ||
ff8f06ee SP |
428 | static void arm_gicv3_common_reset(DeviceState *dev) |
429 | { | |
07e2034d PF |
430 | GICv3State *s = ARM_GICV3_COMMON(dev); |
431 | int i; | |
432 | ||
433 | for (i = 0; i < s->num_cpu; i++) { | |
434 | GICv3CPUState *cs = &s->cpu[i]; | |
435 | ||
436 | cs->level = 0; | |
437 | cs->gicr_ctlr = 0; | |
1611956b PM |
438 | if (s->lpi_enable) { |
439 | /* Our implementation supports clearing GICR_CTLR.EnableLPIs */ | |
440 | cs->gicr_ctlr |= GICR_CTLR_CES; | |
441 | } | |
07e2034d PF |
442 | cs->gicr_statusr[GICV3_S] = 0; |
443 | cs->gicr_statusr[GICV3_NS] = 0; | |
444 | cs->gicr_waker = GICR_WAKER_ProcessorSleep | GICR_WAKER_ChildrenAsleep; | |
445 | cs->gicr_propbaser = 0; | |
446 | cs->gicr_pendbaser = 0; | |
447 | /* If we're resetting a TZ-aware GIC as if secure firmware | |
448 | * had set it up ready to start a kernel in non-secure, we | |
449 | * need to set interrupts to group 1 so the kernel can use them. | |
450 | * Otherwise they reset to group 0 like the hardware. | |
451 | */ | |
452 | if (s->irq_reset_nonsecure) { | |
453 | cs->gicr_igroupr0 = 0xffffffff; | |
454 | } else { | |
455 | cs->gicr_igroupr0 = 0; | |
456 | } | |
457 | ||
458 | cs->gicr_ienabler0 = 0; | |
459 | cs->gicr_ipendr0 = 0; | |
460 | cs->gicr_iactiver0 = 0; | |
461 | cs->edge_trigger = 0xffff; | |
462 | cs->gicr_igrpmodr0 = 0; | |
463 | cs->gicr_nsacr = 0; | |
464 | memset(cs->gicr_ipriorityr, 0, sizeof(cs->gicr_ipriorityr)); | |
465 | ||
ce187c3c | 466 | cs->hppi.prio = 0xff; |
17fb5e36 | 467 | cs->hpplpi.prio = 0xff; |
ce187c3c | 468 | |
07e2034d PF |
469 | /* State in the CPU interface must *not* be reset here, because it |
470 | * is part of the CPU's reset domain, not the GIC device's. | |
471 | */ | |
472 | } | |
473 | ||
474 | /* For our implementation affinity routing is always enabled */ | |
475 | if (s->security_extn) { | |
476 | s->gicd_ctlr = GICD_CTLR_ARE_S | GICD_CTLR_ARE_NS; | |
477 | } else { | |
478 | s->gicd_ctlr = GICD_CTLR_DS | GICD_CTLR_ARE; | |
479 | } | |
480 | ||
481 | s->gicd_statusr[GICV3_S] = 0; | |
482 | s->gicd_statusr[GICV3_NS] = 0; | |
483 | ||
484 | memset(s->group, 0, sizeof(s->group)); | |
485 | memset(s->grpmod, 0, sizeof(s->grpmod)); | |
486 | memset(s->enabled, 0, sizeof(s->enabled)); | |
487 | memset(s->pending, 0, sizeof(s->pending)); | |
488 | memset(s->active, 0, sizeof(s->active)); | |
489 | memset(s->level, 0, sizeof(s->level)); | |
490 | memset(s->edge_trigger, 0, sizeof(s->edge_trigger)); | |
491 | memset(s->gicd_ipriority, 0, sizeof(s->gicd_ipriority)); | |
492 | memset(s->gicd_irouter, 0, sizeof(s->gicd_irouter)); | |
493 | memset(s->gicd_nsacr, 0, sizeof(s->gicd_nsacr)); | |
ce187c3c PM |
494 | /* GICD_IROUTER are UNKNOWN at reset so in theory the guest must |
495 | * write these to get sane behaviour and we need not populate the | |
496 | * pointer cache here; however having the cache be different for | |
497 | * "happened to be 0 from reset" and "guest wrote 0" would be | |
498 | * too confusing. | |
499 | */ | |
500 | gicv3_cache_all_target_cpustates(s); | |
07e2034d PF |
501 | |
502 | if (s->irq_reset_nonsecure) { | |
503 | /* If we're resetting a TZ-aware GIC as if secure firmware | |
504 | * had set it up ready to start a kernel in non-secure, we | |
505 | * need to set interrupts to group 1 so the kernel can use them. | |
506 | * Otherwise they reset to group 0 like the hardware. | |
507 | */ | |
508 | for (i = GIC_INTERNAL; i < s->num_irq; i++) { | |
509 | gicv3_gicd_group_set(s, i); | |
510 | } | |
511 | } | |
910e2048 | 512 | s->gicd_no_migration_shift_bug = true; |
07e2034d PF |
513 | } |
514 | ||
515 | static void arm_gic_common_linux_init(ARMLinuxBootIf *obj, | |
516 | bool secure_boot) | |
517 | { | |
518 | GICv3State *s = ARM_GICV3_COMMON(obj); | |
519 | ||
520 | if (s->security_extn && !secure_boot) { | |
521 | /* We're directly booting a kernel into NonSecure. If this GIC | |
522 | * implements the security extensions then we must configure it | |
523 | * to have all the interrupts be NonSecure (this is a job that | |
524 | * is done by the Secure boot firmware in real hardware, and in | |
525 | * this mode QEMU is acting as a minimalist firmware-and-bootloader | |
526 | * equivalent). | |
527 | */ | |
528 | s->irq_reset_nonsecure = true; | |
529 | } | |
ff8f06ee SP |
530 | } |
531 | ||
532 | static Property arm_gicv3_common_properties[] = { | |
533 | DEFINE_PROP_UINT32("num-cpu", GICv3State, num_cpu, 1), | |
534 | DEFINE_PROP_UINT32("num-irq", GICv3State, num_irq, 32), | |
535 | DEFINE_PROP_UINT32("revision", GICv3State, revision, 3), | |
ac30dec3 | 536 | DEFINE_PROP_BOOL("has-lpi", GICv3State, lpi_enable, 0), |
ff8f06ee | 537 | DEFINE_PROP_BOOL("has-security-extensions", GICv3State, security_extn, 0), |
1e575b66 EA |
538 | DEFINE_PROP_ARRAY("redist-region-count", GICv3State, nb_redist_regions, |
539 | redist_region_count, qdev_prop_uint32, uint32_t), | |
ac30dec3 SM |
540 | DEFINE_PROP_LINK("sysmem", GICv3State, dma, TYPE_MEMORY_REGION, |
541 | MemoryRegion *), | |
ff8f06ee SP |
542 | DEFINE_PROP_END_OF_LIST(), |
543 | }; | |
544 | ||
545 | static void arm_gicv3_common_class_init(ObjectClass *klass, void *data) | |
546 | { | |
547 | DeviceClass *dc = DEVICE_CLASS(klass); | |
07e2034d | 548 | ARMLinuxBootIfClass *albifc = ARM_LINUX_BOOT_IF_CLASS(klass); |
ff8f06ee SP |
549 | |
550 | dc->reset = arm_gicv3_common_reset; | |
551 | dc->realize = arm_gicv3_common_realize; | |
4f67d30b | 552 | device_class_set_props(dc, arm_gicv3_common_properties); |
ff8f06ee | 553 | dc->vmsd = &vmstate_gicv3; |
07e2034d | 554 | albifc->arm_linux_init = arm_gic_common_linux_init; |
ff8f06ee SP |
555 | } |
556 | ||
557 | static const TypeInfo arm_gicv3_common_type = { | |
558 | .name = TYPE_ARM_GICV3_COMMON, | |
559 | .parent = TYPE_SYS_BUS_DEVICE, | |
560 | .instance_size = sizeof(GICv3State), | |
561 | .class_size = sizeof(ARMGICv3CommonClass), | |
562 | .class_init = arm_gicv3_common_class_init, | |
1e575b66 | 563 | .instance_finalize = arm_gicv3_finalize, |
ff8f06ee | 564 | .abstract = true, |
07e2034d PF |
565 | .interfaces = (InterfaceInfo []) { |
566 | { TYPE_ARM_LINUX_BOOT_IF }, | |
567 | { }, | |
568 | }, | |
ff8f06ee SP |
569 | }; |
570 | ||
571 | static void register_types(void) | |
572 | { | |
573 | type_register_static(&arm_gicv3_common_type); | |
574 | } | |
575 | ||
576 | type_init(register_types) |