]>
Commit | Line | Data |
---|---|---|
ff8f06ee SP |
1 | /* |
2 | * ARM GICv3 support - common bits of emulated and KVM kernel model | |
3 | * | |
4 | * Copyright (c) 2012 Linaro Limited | |
5 | * Copyright (c) 2015 Huawei. | |
07e2034d | 6 | * Copyright (c) 2015 Samsung Electronics Co., Ltd. |
ff8f06ee | 7 | * Written by Peter Maydell |
07e2034d | 8 | * Reworked for GICv3 by Shlomo Pongratz and Pavel Fedin |
ff8f06ee SP |
9 | * |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License as published by | |
12 | * the Free Software Foundation, either version 2 of the License, or | |
13 | * (at your option) any later version. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, | |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
18 | * GNU General Public License for more details. | |
19 | * | |
20 | * You should have received a copy of the GNU General Public License along | |
21 | * with this program; if not, see <http://www.gnu.org/licenses/>. | |
22 | */ | |
23 | ||
8ef94f0b | 24 | #include "qemu/osdep.h" |
da34e65c | 25 | #include "qapi/error.h" |
0b8fa32f | 26 | #include "qemu/module.h" |
07e2034d | 27 | #include "qom/cpu.h" |
ff8f06ee | 28 | #include "hw/intc/arm_gicv3_common.h" |
d6454270 | 29 | #include "migration/vmstate.h" |
07e2034d PF |
30 | #include "gicv3_internal.h" |
31 | #include "hw/arm/linux-boot-if.h" | |
910e2048 | 32 | #include "sysemu/kvm.h" |
ff8f06ee | 33 | |
341823c1 PM |
34 | |
35 | static void gicv3_gicd_no_migration_shift_bug_post_load(GICv3State *cs) | |
36 | { | |
37 | if (cs->gicd_no_migration_shift_bug) { | |
38 | return; | |
39 | } | |
40 | ||
41 | /* Older versions of QEMU had a bug in the handling of state save/restore | |
42 | * to the KVM GICv3: they got the offset in the bitmap arrays wrong, | |
43 | * so that instead of the data for external interrupts 32 and up | |
44 | * starting at bit position 32 in the bitmap, it started at bit | |
45 | * position 64. If we're receiving data from a QEMU with that bug, | |
46 | * we must move the data down into the right place. | |
47 | */ | |
48 | memmove(cs->group, (uint8_t *)cs->group + GIC_INTERNAL / 8, | |
49 | sizeof(cs->group) - GIC_INTERNAL / 8); | |
50 | memmove(cs->grpmod, (uint8_t *)cs->grpmod + GIC_INTERNAL / 8, | |
51 | sizeof(cs->grpmod) - GIC_INTERNAL / 8); | |
52 | memmove(cs->enabled, (uint8_t *)cs->enabled + GIC_INTERNAL / 8, | |
53 | sizeof(cs->enabled) - GIC_INTERNAL / 8); | |
54 | memmove(cs->pending, (uint8_t *)cs->pending + GIC_INTERNAL / 8, | |
55 | sizeof(cs->pending) - GIC_INTERNAL / 8); | |
56 | memmove(cs->active, (uint8_t *)cs->active + GIC_INTERNAL / 8, | |
57 | sizeof(cs->active) - GIC_INTERNAL / 8); | |
58 | memmove(cs->edge_trigger, (uint8_t *)cs->edge_trigger + GIC_INTERNAL / 8, | |
59 | sizeof(cs->edge_trigger) - GIC_INTERNAL / 8); | |
60 | ||
61 | /* | |
62 | * While this new version QEMU doesn't have this kind of bug as we fix it, | |
63 | * so it needs to set the flag to true to indicate that and it's necessary | |
64 | * for next migration to work from this new version QEMU. | |
65 | */ | |
66 | cs->gicd_no_migration_shift_bug = true; | |
67 | } | |
68 | ||
44b1ff31 | 69 | static int gicv3_pre_save(void *opaque) |
ff8f06ee SP |
70 | { |
71 | GICv3State *s = (GICv3State *)opaque; | |
72 | ARMGICv3CommonClass *c = ARM_GICV3_COMMON_GET_CLASS(s); | |
73 | ||
74 | if (c->pre_save) { | |
75 | c->pre_save(s); | |
76 | } | |
44b1ff31 DDAG |
77 | |
78 | return 0; | |
ff8f06ee SP |
79 | } |
80 | ||
81 | static int gicv3_post_load(void *opaque, int version_id) | |
82 | { | |
83 | GICv3State *s = (GICv3State *)opaque; | |
84 | ARMGICv3CommonClass *c = ARM_GICV3_COMMON_GET_CLASS(s); | |
85 | ||
341823c1 PM |
86 | gicv3_gicd_no_migration_shift_bug_post_load(s); |
87 | ||
ff8f06ee SP |
88 | if (c->post_load) { |
89 | c->post_load(s); | |
90 | } | |
91 | return 0; | |
92 | } | |
93 | ||
4eb833b5 PM |
94 | static bool virt_state_needed(void *opaque) |
95 | { | |
96 | GICv3CPUState *cs = opaque; | |
97 | ||
98 | return cs->num_list_regs != 0; | |
99 | } | |
100 | ||
101 | static const VMStateDescription vmstate_gicv3_cpu_virt = { | |
102 | .name = "arm_gicv3_cpu/virt", | |
103 | .version_id = 1, | |
104 | .minimum_version_id = 1, | |
105 | .needed = virt_state_needed, | |
106 | .fields = (VMStateField[]) { | |
107 | VMSTATE_UINT64_2DARRAY(ich_apr, GICv3CPUState, 3, 4), | |
108 | VMSTATE_UINT64(ich_hcr_el2, GICv3CPUState), | |
109 | VMSTATE_UINT64_ARRAY(ich_lr_el2, GICv3CPUState, GICV3_LR_MAX), | |
110 | VMSTATE_UINT64(ich_vmcr_el2, GICv3CPUState), | |
111 | VMSTATE_END_OF_LIST() | |
112 | } | |
113 | }; | |
114 | ||
326049cc | 115 | static int vmstate_gicv3_cpu_pre_load(void *opaque) |
6692aac4 VK |
116 | { |
117 | GICv3CPUState *cs = opaque; | |
118 | ||
119 | /* | |
120 | * If the sre_el1 subsection is not transferred this | |
121 | * means SRE_EL1 is 0x7 (which might not be the same as | |
122 | * our reset value). | |
123 | */ | |
124 | cs->icc_sre_el1 = 0x7; | |
125 | return 0; | |
126 | } | |
127 | ||
128 | static bool icc_sre_el1_reg_needed(void *opaque) | |
129 | { | |
130 | GICv3CPUState *cs = opaque; | |
131 | ||
132 | return cs->icc_sre_el1 != 7; | |
133 | } | |
134 | ||
135 | const VMStateDescription vmstate_gicv3_cpu_sre_el1 = { | |
136 | .name = "arm_gicv3_cpu/sre_el1", | |
137 | .version_id = 1, | |
138 | .minimum_version_id = 1, | |
6692aac4 VK |
139 | .needed = icc_sre_el1_reg_needed, |
140 | .fields = (VMStateField[]) { | |
141 | VMSTATE_UINT64(icc_sre_el1, GICv3CPUState), | |
142 | VMSTATE_END_OF_LIST() | |
143 | } | |
144 | }; | |
145 | ||
757caeed PF |
146 | static const VMStateDescription vmstate_gicv3_cpu = { |
147 | .name = "arm_gicv3_cpu", | |
148 | .version_id = 1, | |
149 | .minimum_version_id = 1, | |
326049cc | 150 | .pre_load = vmstate_gicv3_cpu_pre_load, |
757caeed PF |
151 | .fields = (VMStateField[]) { |
152 | VMSTATE_UINT32(level, GICv3CPUState), | |
153 | VMSTATE_UINT32(gicr_ctlr, GICv3CPUState), | |
154 | VMSTATE_UINT32_ARRAY(gicr_statusr, GICv3CPUState, 2), | |
155 | VMSTATE_UINT32(gicr_waker, GICv3CPUState), | |
156 | VMSTATE_UINT64(gicr_propbaser, GICv3CPUState), | |
157 | VMSTATE_UINT64(gicr_pendbaser, GICv3CPUState), | |
158 | VMSTATE_UINT32(gicr_igroupr0, GICv3CPUState), | |
159 | VMSTATE_UINT32(gicr_ienabler0, GICv3CPUState), | |
160 | VMSTATE_UINT32(gicr_ipendr0, GICv3CPUState), | |
161 | VMSTATE_UINT32(gicr_iactiver0, GICv3CPUState), | |
162 | VMSTATE_UINT32(edge_trigger, GICv3CPUState), | |
163 | VMSTATE_UINT32(gicr_igrpmodr0, GICv3CPUState), | |
164 | VMSTATE_UINT32(gicr_nsacr, GICv3CPUState), | |
165 | VMSTATE_UINT8_ARRAY(gicr_ipriorityr, GICv3CPUState, GIC_INTERNAL), | |
166 | VMSTATE_UINT64_ARRAY(icc_ctlr_el1, GICv3CPUState, 2), | |
167 | VMSTATE_UINT64(icc_pmr_el1, GICv3CPUState), | |
168 | VMSTATE_UINT64_ARRAY(icc_bpr, GICv3CPUState, 3), | |
169 | VMSTATE_UINT64_2DARRAY(icc_apr, GICv3CPUState, 3, 4), | |
170 | VMSTATE_UINT64_ARRAY(icc_igrpen, GICv3CPUState, 3), | |
171 | VMSTATE_UINT64(icc_ctlr_el3, GICv3CPUState), | |
172 | VMSTATE_END_OF_LIST() | |
4eb833b5 PM |
173 | }, |
174 | .subsections = (const VMStateDescription * []) { | |
175 | &vmstate_gicv3_cpu_virt, | |
6692aac4 VK |
176 | &vmstate_gicv3_cpu_sre_el1, |
177 | NULL | |
757caeed PF |
178 | } |
179 | }; | |
180 | ||
326049cc | 181 | static int gicv3_pre_load(void *opaque) |
910e2048 SZ |
182 | { |
183 | GICv3State *cs = opaque; | |
184 | ||
185 | /* | |
186 | * The gicd_no_migration_shift_bug flag is used for migration compatibility | |
187 | * for old version QEMU which may have the GICD bmp shift bug under KVM mode. | |
188 | * Strictly, what we want to know is whether the migration source is using | |
189 | * KVM. Since we don't have any way to determine that, we look at whether the | |
190 | * destination is using KVM; this is close enough because for the older QEMU | |
191 | * versions with this bug KVM -> TCG migration didn't work anyway. If the | |
192 | * source is a newer QEMU without this bug it will transmit the migration | |
193 | * subsection which sets the flag to true; otherwise it will remain set to | |
194 | * the value we select here. | |
195 | */ | |
196 | if (kvm_enabled()) { | |
197 | cs->gicd_no_migration_shift_bug = false; | |
198 | } | |
199 | ||
200 | return 0; | |
201 | } | |
202 | ||
78e9ddd7 PM |
203 | static bool needed_always(void *opaque) |
204 | { | |
205 | return true; | |
206 | } | |
207 | ||
910e2048 SZ |
208 | const VMStateDescription vmstate_gicv3_gicd_no_migration_shift_bug = { |
209 | .name = "arm_gicv3/gicd_no_migration_shift_bug", | |
210 | .version_id = 1, | |
211 | .minimum_version_id = 1, | |
78e9ddd7 | 212 | .needed = needed_always, |
910e2048 SZ |
213 | .fields = (VMStateField[]) { |
214 | VMSTATE_BOOL(gicd_no_migration_shift_bug, GICv3State), | |
215 | VMSTATE_END_OF_LIST() | |
216 | } | |
217 | }; | |
218 | ||
ff8f06ee SP |
219 | static const VMStateDescription vmstate_gicv3 = { |
220 | .name = "arm_gicv3", | |
757caeed PF |
221 | .version_id = 1, |
222 | .minimum_version_id = 1, | |
326049cc | 223 | .pre_load = gicv3_pre_load, |
ff8f06ee SP |
224 | .pre_save = gicv3_pre_save, |
225 | .post_load = gicv3_post_load, | |
252a7a6a | 226 | .priority = MIG_PRI_GICV3, |
757caeed PF |
227 | .fields = (VMStateField[]) { |
228 | VMSTATE_UINT32(gicd_ctlr, GICv3State), | |
229 | VMSTATE_UINT32_ARRAY(gicd_statusr, GICv3State, 2), | |
230 | VMSTATE_UINT32_ARRAY(group, GICv3State, GICV3_BMP_SIZE), | |
231 | VMSTATE_UINT32_ARRAY(grpmod, GICv3State, GICV3_BMP_SIZE), | |
232 | VMSTATE_UINT32_ARRAY(enabled, GICv3State, GICV3_BMP_SIZE), | |
233 | VMSTATE_UINT32_ARRAY(pending, GICv3State, GICV3_BMP_SIZE), | |
234 | VMSTATE_UINT32_ARRAY(active, GICv3State, GICV3_BMP_SIZE), | |
235 | VMSTATE_UINT32_ARRAY(level, GICv3State, GICV3_BMP_SIZE), | |
236 | VMSTATE_UINT32_ARRAY(edge_trigger, GICv3State, GICV3_BMP_SIZE), | |
237 | VMSTATE_UINT8_ARRAY(gicd_ipriority, GICv3State, GICV3_MAXIRQ), | |
238 | VMSTATE_UINT64_ARRAY(gicd_irouter, GICv3State, GICV3_MAXIRQ), | |
239 | VMSTATE_UINT32_ARRAY(gicd_nsacr, GICv3State, | |
240 | DIV_ROUND_UP(GICV3_MAXIRQ, 16)), | |
241 | VMSTATE_STRUCT_VARRAY_POINTER_UINT32(cpu, GICv3State, num_cpu, | |
242 | vmstate_gicv3_cpu, GICv3CPUState), | |
243 | VMSTATE_END_OF_LIST() | |
910e2048 SZ |
244 | }, |
245 | .subsections = (const VMStateDescription * []) { | |
246 | &vmstate_gicv3_gicd_no_migration_shift_bug, | |
247 | NULL | |
757caeed | 248 | } |
ff8f06ee SP |
249 | }; |
250 | ||
251 | void gicv3_init_irqs_and_mmio(GICv3State *s, qemu_irq_handler handler, | |
1e575b66 | 252 | const MemoryRegionOps *ops, Error **errp) |
ff8f06ee SP |
253 | { |
254 | SysBusDevice *sbd = SYS_BUS_DEVICE(s); | |
1e575b66 | 255 | int rdist_capacity = 0; |
ff8f06ee SP |
256 | int i; |
257 | ||
1e575b66 EA |
258 | for (i = 0; i < s->nb_redist_regions; i++) { |
259 | rdist_capacity += s->redist_region_count[i]; | |
260 | } | |
261 | if (rdist_capacity < s->num_cpu) { | |
262 | error_setg(errp, "Capacity of the redist regions(%d) " | |
263 | "is less than number of vcpus(%d)", | |
264 | rdist_capacity, s->num_cpu); | |
265 | return; | |
266 | } | |
267 | ||
ff8f06ee SP |
268 | /* For the GIC, also expose incoming GPIO lines for PPIs for each CPU. |
269 | * GPIO array layout is thus: | |
270 | * [0..N-1] spi | |
271 | * [N..N+31] PPIs for CPU 0 | |
272 | * [N+32..N+63] PPIs for CPU 1 | |
273 | * ... | |
274 | */ | |
275 | i = s->num_irq - GIC_INTERNAL + GIC_INTERNAL * s->num_cpu; | |
276 | qdev_init_gpio_in(DEVICE(s), handler, i); | |
277 | ||
ff8f06ee | 278 | for (i = 0; i < s->num_cpu; i++) { |
3faf2b0c | 279 | sysbus_init_irq(sbd, &s->cpu[i].parent_irq); |
ff8f06ee SP |
280 | } |
281 | for (i = 0; i < s->num_cpu; i++) { | |
3faf2b0c | 282 | sysbus_init_irq(sbd, &s->cpu[i].parent_fiq); |
ff8f06ee | 283 | } |
b53db42b PM |
284 | for (i = 0; i < s->num_cpu; i++) { |
285 | sysbus_init_irq(sbd, &s->cpu[i].parent_virq); | |
286 | } | |
287 | for (i = 0; i < s->num_cpu; i++) { | |
288 | sysbus_init_irq(sbd, &s->cpu[i].parent_vfiq); | |
289 | } | |
ff8f06ee SP |
290 | |
291 | memory_region_init_io(&s->iomem_dist, OBJECT(s), ops, s, | |
292 | "gicv3_dist", 0x10000); | |
ff8f06ee | 293 | sysbus_init_mmio(sbd, &s->iomem_dist); |
1e575b66 EA |
294 | |
295 | s->iomem_redist = g_new0(MemoryRegion, s->nb_redist_regions); | |
296 | for (i = 0; i < s->nb_redist_regions; i++) { | |
297 | char *name = g_strdup_printf("gicv3_redist_region[%d]", i); | |
298 | ||
299 | memory_region_init_io(&s->iomem_redist[i], OBJECT(s), | |
300 | ops ? &ops[1] : NULL, s, name, | |
301 | s->redist_region_count[i] * GICV3_REDIST_SIZE); | |
302 | sysbus_init_mmio(sbd, &s->iomem_redist[i]); | |
303 | g_free(name); | |
304 | } | |
ff8f06ee SP |
305 | } |
306 | ||
307 | static void arm_gicv3_common_realize(DeviceState *dev, Error **errp) | |
308 | { | |
309 | GICv3State *s = ARM_GICV3_COMMON(dev); | |
07e2034d | 310 | int i; |
ff8f06ee SP |
311 | |
312 | /* revision property is actually reserved and currently used only in order | |
313 | * to keep the interface compatible with GICv2 code, avoiding extra | |
314 | * conditions. However, in future it could be used, for example, if we | |
315 | * implement GICv4. | |
316 | */ | |
317 | if (s->revision != 3) { | |
318 | error_setg(errp, "unsupported GIC revision %d", s->revision); | |
319 | return; | |
320 | } | |
07e2034d PF |
321 | |
322 | if (s->num_irq > GICV3_MAXIRQ) { | |
323 | error_setg(errp, | |
324 | "requested %u interrupt lines exceeds GIC maximum %d", | |
325 | s->num_irq, GICV3_MAXIRQ); | |
326 | return; | |
327 | } | |
328 | if (s->num_irq < GIC_INTERNAL) { | |
329 | error_setg(errp, | |
330 | "requested %u interrupt lines is below GIC minimum %d", | |
331 | s->num_irq, GIC_INTERNAL); | |
332 | return; | |
333 | } | |
334 | ||
335 | /* ITLinesNumber is represented as (N / 32) - 1, so this is an | |
336 | * implementation imposed restriction, not an architectural one, | |
337 | * so we don't have to deal with bitfields where only some of the | |
338 | * bits in a 32-bit word should be valid. | |
339 | */ | |
340 | if (s->num_irq % 32) { | |
341 | error_setg(errp, | |
342 | "%d interrupt lines unsupported: not divisible by 32", | |
343 | s->num_irq); | |
344 | return; | |
345 | } | |
346 | ||
347 | s->cpu = g_new0(GICv3CPUState, s->num_cpu); | |
348 | ||
349 | for (i = 0; i < s->num_cpu; i++) { | |
350 | CPUState *cpu = qemu_get_cpu(i); | |
351 | uint64_t cpu_affid; | |
352 | int last; | |
353 | ||
354 | s->cpu[i].cpu = cpu; | |
355 | s->cpu[i].gic = s; | |
d3a3e529 VK |
356 | /* Store GICv3CPUState in CPUARMState gicv3state pointer */ |
357 | gicv3_set_gicv3state(cpu, &s->cpu[i]); | |
07e2034d PF |
358 | |
359 | /* Pre-construct the GICR_TYPER: | |
360 | * For our implementation: | |
361 | * Top 32 bits are the affinity value of the associated CPU | |
362 | * CommonLPIAff == 01 (redistributors with same Aff3 share LPI table) | |
363 | * Processor_Number == CPU index starting from 0 | |
364 | * DPGS == 0 (GICR_CTLR.DPG* not supported) | |
365 | * Last == 1 if this is the last redistributor in a series of | |
366 | * contiguous redistributor pages | |
367 | * DirectLPI == 0 (direct injection of LPIs not supported) | |
368 | * VLPIS == 0 (virtual LPIs not supported) | |
369 | * PLPIS == 0 (physical LPIs not supported) | |
370 | */ | |
77a7a367 | 371 | cpu_affid = object_property_get_uint(OBJECT(cpu), "mp-affinity", NULL); |
07e2034d PF |
372 | last = (i == s->num_cpu - 1); |
373 | ||
374 | /* The CPU mp-affinity property is in MPIDR register format; squash | |
375 | * the affinity bytes into 32 bits as the GICR_TYPER has them. | |
376 | */ | |
92204403 AJ |
377 | cpu_affid = ((cpu_affid & 0xFF00000000ULL) >> 8) | |
378 | (cpu_affid & 0xFFFFFF); | |
07e2034d PF |
379 | s->cpu[i].gicr_typer = (cpu_affid << 32) | |
380 | (1 << 24) | | |
381 | (i << 8) | | |
382 | (last << 4); | |
383 | } | |
ff8f06ee SP |
384 | } |
385 | ||
1e575b66 EA |
386 | static void arm_gicv3_finalize(Object *obj) |
387 | { | |
388 | GICv3State *s = ARM_GICV3_COMMON(obj); | |
389 | ||
390 | g_free(s->redist_region_count); | |
391 | } | |
392 | ||
ff8f06ee SP |
393 | static void arm_gicv3_common_reset(DeviceState *dev) |
394 | { | |
07e2034d PF |
395 | GICv3State *s = ARM_GICV3_COMMON(dev); |
396 | int i; | |
397 | ||
398 | for (i = 0; i < s->num_cpu; i++) { | |
399 | GICv3CPUState *cs = &s->cpu[i]; | |
400 | ||
401 | cs->level = 0; | |
402 | cs->gicr_ctlr = 0; | |
403 | cs->gicr_statusr[GICV3_S] = 0; | |
404 | cs->gicr_statusr[GICV3_NS] = 0; | |
405 | cs->gicr_waker = GICR_WAKER_ProcessorSleep | GICR_WAKER_ChildrenAsleep; | |
406 | cs->gicr_propbaser = 0; | |
407 | cs->gicr_pendbaser = 0; | |
408 | /* If we're resetting a TZ-aware GIC as if secure firmware | |
409 | * had set it up ready to start a kernel in non-secure, we | |
410 | * need to set interrupts to group 1 so the kernel can use them. | |
411 | * Otherwise they reset to group 0 like the hardware. | |
412 | */ | |
413 | if (s->irq_reset_nonsecure) { | |
414 | cs->gicr_igroupr0 = 0xffffffff; | |
415 | } else { | |
416 | cs->gicr_igroupr0 = 0; | |
417 | } | |
418 | ||
419 | cs->gicr_ienabler0 = 0; | |
420 | cs->gicr_ipendr0 = 0; | |
421 | cs->gicr_iactiver0 = 0; | |
422 | cs->edge_trigger = 0xffff; | |
423 | cs->gicr_igrpmodr0 = 0; | |
424 | cs->gicr_nsacr = 0; | |
425 | memset(cs->gicr_ipriorityr, 0, sizeof(cs->gicr_ipriorityr)); | |
426 | ||
ce187c3c PM |
427 | cs->hppi.prio = 0xff; |
428 | ||
07e2034d PF |
429 | /* State in the CPU interface must *not* be reset here, because it |
430 | * is part of the CPU's reset domain, not the GIC device's. | |
431 | */ | |
432 | } | |
433 | ||
434 | /* For our implementation affinity routing is always enabled */ | |
435 | if (s->security_extn) { | |
436 | s->gicd_ctlr = GICD_CTLR_ARE_S | GICD_CTLR_ARE_NS; | |
437 | } else { | |
438 | s->gicd_ctlr = GICD_CTLR_DS | GICD_CTLR_ARE; | |
439 | } | |
440 | ||
441 | s->gicd_statusr[GICV3_S] = 0; | |
442 | s->gicd_statusr[GICV3_NS] = 0; | |
443 | ||
444 | memset(s->group, 0, sizeof(s->group)); | |
445 | memset(s->grpmod, 0, sizeof(s->grpmod)); | |
446 | memset(s->enabled, 0, sizeof(s->enabled)); | |
447 | memset(s->pending, 0, sizeof(s->pending)); | |
448 | memset(s->active, 0, sizeof(s->active)); | |
449 | memset(s->level, 0, sizeof(s->level)); | |
450 | memset(s->edge_trigger, 0, sizeof(s->edge_trigger)); | |
451 | memset(s->gicd_ipriority, 0, sizeof(s->gicd_ipriority)); | |
452 | memset(s->gicd_irouter, 0, sizeof(s->gicd_irouter)); | |
453 | memset(s->gicd_nsacr, 0, sizeof(s->gicd_nsacr)); | |
ce187c3c PM |
454 | /* GICD_IROUTER are UNKNOWN at reset so in theory the guest must |
455 | * write these to get sane behaviour and we need not populate the | |
456 | * pointer cache here; however having the cache be different for | |
457 | * "happened to be 0 from reset" and "guest wrote 0" would be | |
458 | * too confusing. | |
459 | */ | |
460 | gicv3_cache_all_target_cpustates(s); | |
07e2034d PF |
461 | |
462 | if (s->irq_reset_nonsecure) { | |
463 | /* If we're resetting a TZ-aware GIC as if secure firmware | |
464 | * had set it up ready to start a kernel in non-secure, we | |
465 | * need to set interrupts to group 1 so the kernel can use them. | |
466 | * Otherwise they reset to group 0 like the hardware. | |
467 | */ | |
468 | for (i = GIC_INTERNAL; i < s->num_irq; i++) { | |
469 | gicv3_gicd_group_set(s, i); | |
470 | } | |
471 | } | |
910e2048 | 472 | s->gicd_no_migration_shift_bug = true; |
07e2034d PF |
473 | } |
474 | ||
475 | static void arm_gic_common_linux_init(ARMLinuxBootIf *obj, | |
476 | bool secure_boot) | |
477 | { | |
478 | GICv3State *s = ARM_GICV3_COMMON(obj); | |
479 | ||
480 | if (s->security_extn && !secure_boot) { | |
481 | /* We're directly booting a kernel into NonSecure. If this GIC | |
482 | * implements the security extensions then we must configure it | |
483 | * to have all the interrupts be NonSecure (this is a job that | |
484 | * is done by the Secure boot firmware in real hardware, and in | |
485 | * this mode QEMU is acting as a minimalist firmware-and-bootloader | |
486 | * equivalent). | |
487 | */ | |
488 | s->irq_reset_nonsecure = true; | |
489 | } | |
ff8f06ee SP |
490 | } |
491 | ||
492 | static Property arm_gicv3_common_properties[] = { | |
493 | DEFINE_PROP_UINT32("num-cpu", GICv3State, num_cpu, 1), | |
494 | DEFINE_PROP_UINT32("num-irq", GICv3State, num_irq, 32), | |
495 | DEFINE_PROP_UINT32("revision", GICv3State, revision, 3), | |
496 | DEFINE_PROP_BOOL("has-security-extensions", GICv3State, security_extn, 0), | |
1e575b66 EA |
497 | DEFINE_PROP_ARRAY("redist-region-count", GICv3State, nb_redist_regions, |
498 | redist_region_count, qdev_prop_uint32, uint32_t), | |
ff8f06ee SP |
499 | DEFINE_PROP_END_OF_LIST(), |
500 | }; | |
501 | ||
502 | static void arm_gicv3_common_class_init(ObjectClass *klass, void *data) | |
503 | { | |
504 | DeviceClass *dc = DEVICE_CLASS(klass); | |
07e2034d | 505 | ARMLinuxBootIfClass *albifc = ARM_LINUX_BOOT_IF_CLASS(klass); |
ff8f06ee SP |
506 | |
507 | dc->reset = arm_gicv3_common_reset; | |
508 | dc->realize = arm_gicv3_common_realize; | |
509 | dc->props = arm_gicv3_common_properties; | |
510 | dc->vmsd = &vmstate_gicv3; | |
07e2034d | 511 | albifc->arm_linux_init = arm_gic_common_linux_init; |
ff8f06ee SP |
512 | } |
513 | ||
514 | static const TypeInfo arm_gicv3_common_type = { | |
515 | .name = TYPE_ARM_GICV3_COMMON, | |
516 | .parent = TYPE_SYS_BUS_DEVICE, | |
517 | .instance_size = sizeof(GICv3State), | |
518 | .class_size = sizeof(ARMGICv3CommonClass), | |
519 | .class_init = arm_gicv3_common_class_init, | |
1e575b66 | 520 | .instance_finalize = arm_gicv3_finalize, |
ff8f06ee | 521 | .abstract = true, |
07e2034d PF |
522 | .interfaces = (InterfaceInfo []) { |
523 | { TYPE_ARM_LINUX_BOOT_IF }, | |
524 | { }, | |
525 | }, | |
ff8f06ee SP |
526 | }; |
527 | ||
528 | static void register_types(void) | |
529 | { | |
530 | type_register_static(&arm_gicv3_common_type); | |
531 | } | |
532 | ||
533 | type_init(register_types) |