]> git.proxmox.com Git - mirror_qemu.git/blob - hw/riscv/virt-acpi-build.c
hw/riscv/virt-acpi-build.c: Add IO controllers and devices
[mirror_qemu.git] / hw / riscv / virt-acpi-build.c
1 /*
2 * Support for generating ACPI tables and passing them to Guests
3 *
4 * RISC-V virt ACPI generation
5 *
6 * Copyright (C) 2008-2010 Kevin O'Connor <kevin@koconnor.net>
7 * Copyright (C) 2006 Fabrice Bellard
8 * Copyright (C) 2013 Red Hat Inc
9 * Copyright (c) 2015 HUAWEI TECHNOLOGIES CO.,LTD.
10 * Copyright (C) 2021-2023 Ventana Micro Systems Inc
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, see <http://www.gnu.org/licenses/>.
24 */
25
26 #include "qemu/osdep.h"
27 #include "hw/acpi/acpi-defs.h"
28 #include "hw/acpi/acpi.h"
29 #include "hw/acpi/aml-build.h"
30 #include "hw/acpi/pci.h"
31 #include "hw/acpi/utils.h"
32 #include "hw/intc/riscv_aclint.h"
33 #include "hw/nvram/fw_cfg_acpi.h"
34 #include "hw/pci-host/gpex.h"
35 #include "hw/riscv/virt.h"
36 #include "hw/riscv/numa.h"
37 #include "hw/virtio/virtio-acpi.h"
38 #include "migration/vmstate.h"
39 #include "qapi/error.h"
40 #include "qemu/error-report.h"
41 #include "sysemu/reset.h"
42
43 #define ACPI_BUILD_TABLE_SIZE 0x20000
44 #define ACPI_BUILD_INTC_ID(socket, index) ((socket << 24) | (index))
45
46 typedef struct AcpiBuildState {
47 /* Copy of table in RAM (for patching) */
48 MemoryRegion *table_mr;
49 MemoryRegion *rsdp_mr;
50 MemoryRegion *linker_mr;
51 /* Is table patched? */
52 bool patched;
53 } AcpiBuildState;
54
55 static void acpi_align_size(GArray *blob, unsigned align)
56 {
57 /*
58 * Align size to multiple of given size. This reduces the chance
59 * we need to change size in the future (breaking cross version migration).
60 */
61 g_array_set_size(blob, ROUND_UP(acpi_data_len(blob), align));
62 }
63
64 static void riscv_acpi_madt_add_rintc(uint32_t uid,
65 const CPUArchIdList *arch_ids,
66 GArray *entry,
67 RISCVVirtState *s)
68 {
69 uint8_t guest_index_bits = imsic_num_bits(s->aia_guests + 1);
70 uint64_t hart_id = arch_ids->cpus[uid].arch_id;
71 uint32_t imsic_size, local_cpu_id, socket_id;
72 uint64_t imsic_socket_addr, imsic_addr;
73 MachineState *ms = MACHINE(s);
74
75 socket_id = arch_ids->cpus[uid].props.node_id;
76 local_cpu_id = (arch_ids->cpus[uid].arch_id -
77 riscv_socket_first_hartid(ms, socket_id)) %
78 riscv_socket_hart_count(ms, socket_id);
79 imsic_socket_addr = s->memmap[VIRT_IMSIC_S].base +
80 (socket_id * VIRT_IMSIC_GROUP_MAX_SIZE);
81 imsic_size = IMSIC_HART_SIZE(guest_index_bits);
82 imsic_addr = imsic_socket_addr + local_cpu_id * imsic_size;
83 build_append_int_noprefix(entry, 0x18, 1); /* Type */
84 build_append_int_noprefix(entry, 36, 1); /* Length */
85 build_append_int_noprefix(entry, 1, 1); /* Version */
86 build_append_int_noprefix(entry, 0, 1); /* Reserved */
87 build_append_int_noprefix(entry, 0x1, 4); /* Flags */
88 build_append_int_noprefix(entry, hart_id, 8); /* Hart ID */
89 build_append_int_noprefix(entry, uid, 4); /* ACPI Processor UID */
90 /* External Interrupt Controller ID */
91 if (s->aia_type == VIRT_AIA_TYPE_APLIC) {
92 build_append_int_noprefix(entry,
93 ACPI_BUILD_INTC_ID(
94 arch_ids->cpus[uid].props.node_id,
95 local_cpu_id),
96 4);
97 } else {
98 build_append_int_noprefix(entry, 0, 4);
99 }
100
101 if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
102 /* IMSIC Base address */
103 build_append_int_noprefix(entry, imsic_addr, 8);
104 /* IMSIC Size */
105 build_append_int_noprefix(entry, imsic_size, 4);
106 } else {
107 build_append_int_noprefix(entry, 0, 8);
108 build_append_int_noprefix(entry, 0, 4);
109 }
110 }
111
112 static void acpi_dsdt_add_cpus(Aml *scope, RISCVVirtState *s)
113 {
114 MachineClass *mc = MACHINE_GET_CLASS(s);
115 MachineState *ms = MACHINE(s);
116 const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(ms);
117
118 for (int i = 0; i < arch_ids->len; i++) {
119 Aml *dev;
120 GArray *madt_buf = g_array_new(0, 1, 1);
121
122 dev = aml_device("C%.03X", i);
123 aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0007")));
124 aml_append(dev, aml_name_decl("_UID",
125 aml_int(arch_ids->cpus[i].arch_id)));
126
127 /* build _MAT object */
128 riscv_acpi_madt_add_rintc(i, arch_ids, madt_buf, s);
129 aml_append(dev, aml_name_decl("_MAT",
130 aml_buffer(madt_buf->len,
131 (uint8_t *)madt_buf->data)));
132 g_array_free(madt_buf, true);
133
134 aml_append(scope, dev);
135 }
136 }
137
138 static void
139 acpi_dsdt_add_uart(Aml *scope, const MemMapEntry *uart_memmap,
140 uint32_t uart_irq)
141 {
142 Aml *dev = aml_device("COM0");
143 aml_append(dev, aml_name_decl("_HID", aml_string("PNP0501")));
144 aml_append(dev, aml_name_decl("_UID", aml_int(0)));
145
146 Aml *crs = aml_resource_template();
147 aml_append(crs, aml_memory32_fixed(uart_memmap->base,
148 uart_memmap->size, AML_READ_WRITE));
149 aml_append(crs,
150 aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
151 AML_EXCLUSIVE, &uart_irq, 1));
152 aml_append(dev, aml_name_decl("_CRS", crs));
153
154 Aml *pkg = aml_package(2);
155 aml_append(pkg, aml_string("clock-frequency"));
156 aml_append(pkg, aml_int(3686400));
157
158 Aml *UUID = aml_touuid("DAFFD814-6EBA-4D8C-8A91-BC9BBF4AA301");
159
160 Aml *pkg1 = aml_package(1);
161 aml_append(pkg1, pkg);
162
163 Aml *package = aml_package(2);
164 aml_append(package, UUID);
165 aml_append(package, pkg1);
166
167 aml_append(dev, aml_name_decl("_DSD", package));
168 aml_append(scope, dev);
169 }
170
171 /* RHCT Node[N] starts at offset 56 */
172 #define RHCT_NODE_ARRAY_OFFSET 56
173
174 /*
175 * ACPI spec, Revision 6.5+
176 * 5.2.36 RISC-V Hart Capabilities Table (RHCT)
177 * REF: https://github.com/riscv-non-isa/riscv-acpi/issues/16
178 * https://drive.google.com/file/d/1nP3nFiH4jkPMp6COOxP6123DCZKR-tia/view
179 * https://drive.google.com/file/d/1sKbOa8m1UZw1JkquZYe3F1zQBN1xXsaf/view
180 */
181 static void build_rhct(GArray *table_data,
182 BIOSLinker *linker,
183 RISCVVirtState *s)
184 {
185 MachineClass *mc = MACHINE_GET_CLASS(s);
186 MachineState *ms = MACHINE(s);
187 const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(ms);
188 size_t len, aligned_len;
189 uint32_t isa_offset, num_rhct_nodes, cmo_offset = 0;
190 RISCVCPU *cpu = &s->soc[0].harts[0];
191 uint32_t mmu_offset = 0;
192 uint8_t satp_mode_max;
193 char *isa;
194
195 AcpiTable table = { .sig = "RHCT", .rev = 1, .oem_id = s->oem_id,
196 .oem_table_id = s->oem_table_id };
197
198 acpi_table_begin(&table, table_data);
199
200 build_append_int_noprefix(table_data, 0x0, 4); /* Reserved */
201
202 /* Time Base Frequency */
203 build_append_int_noprefix(table_data,
204 RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, 8);
205
206 /* ISA + N hart info */
207 num_rhct_nodes = 1 + ms->smp.cpus;
208 if (cpu->cfg.ext_zicbom || cpu->cfg.ext_zicboz) {
209 num_rhct_nodes++;
210 }
211
212 if (cpu->cfg.satp_mode.supported != 0) {
213 num_rhct_nodes++;
214 }
215
216 /* Number of RHCT nodes*/
217 build_append_int_noprefix(table_data, num_rhct_nodes, 4);
218
219 /* Offset to the RHCT node array */
220 build_append_int_noprefix(table_data, RHCT_NODE_ARRAY_OFFSET, 4);
221
222 /* ISA String Node */
223 isa_offset = table_data->len - table.table_offset;
224 build_append_int_noprefix(table_data, 0, 2); /* Type 0 */
225
226 isa = riscv_isa_string(cpu);
227 len = 8 + strlen(isa) + 1;
228 aligned_len = (len % 2) ? (len + 1) : len;
229
230 build_append_int_noprefix(table_data, aligned_len, 2); /* Length */
231 build_append_int_noprefix(table_data, 0x1, 2); /* Revision */
232
233 /* ISA string length including NUL */
234 build_append_int_noprefix(table_data, strlen(isa) + 1, 2);
235 g_array_append_vals(table_data, isa, strlen(isa) + 1); /* ISA string */
236
237 if (aligned_len != len) {
238 build_append_int_noprefix(table_data, 0x0, 1); /* Optional Padding */
239 }
240
241 /* CMO node */
242 if (cpu->cfg.ext_zicbom || cpu->cfg.ext_zicboz) {
243 cmo_offset = table_data->len - table.table_offset;
244 build_append_int_noprefix(table_data, 1, 2); /* Type */
245 build_append_int_noprefix(table_data, 10, 2); /* Length */
246 build_append_int_noprefix(table_data, 0x1, 2); /* Revision */
247 build_append_int_noprefix(table_data, 0, 1); /* Reserved */
248
249 /* CBOM block size */
250 if (cpu->cfg.cbom_blocksize) {
251 build_append_int_noprefix(table_data,
252 __builtin_ctz(cpu->cfg.cbom_blocksize),
253 1);
254 } else {
255 build_append_int_noprefix(table_data, 0, 1);
256 }
257
258 /* CBOP block size */
259 build_append_int_noprefix(table_data, 0, 1);
260
261 /* CBOZ block size */
262 if (cpu->cfg.cboz_blocksize) {
263 build_append_int_noprefix(table_data,
264 __builtin_ctz(cpu->cfg.cboz_blocksize),
265 1);
266 } else {
267 build_append_int_noprefix(table_data, 0, 1);
268 }
269 }
270
271 /* MMU node structure */
272 if (cpu->cfg.satp_mode.supported != 0) {
273 satp_mode_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
274 mmu_offset = table_data->len - table.table_offset;
275 build_append_int_noprefix(table_data, 2, 2); /* Type */
276 build_append_int_noprefix(table_data, 8, 2); /* Length */
277 build_append_int_noprefix(table_data, 0x1, 2); /* Revision */
278 build_append_int_noprefix(table_data, 0, 1); /* Reserved */
279 /* MMU Type */
280 if (satp_mode_max == VM_1_10_SV57) {
281 build_append_int_noprefix(table_data, 2, 1); /* Sv57 */
282 } else if (satp_mode_max == VM_1_10_SV48) {
283 build_append_int_noprefix(table_data, 1, 1); /* Sv48 */
284 } else if (satp_mode_max == VM_1_10_SV39) {
285 build_append_int_noprefix(table_data, 0, 1); /* Sv39 */
286 } else {
287 assert(1);
288 }
289 }
290
291 /* Hart Info Node */
292 for (int i = 0; i < arch_ids->len; i++) {
293 len = 16;
294 int num_offsets = 1;
295 build_append_int_noprefix(table_data, 0xFFFF, 2); /* Type */
296
297 /* Length */
298 if (cmo_offset) {
299 len += 4;
300 num_offsets++;
301 }
302
303 if (mmu_offset) {
304 len += 4;
305 num_offsets++;
306 }
307
308 build_append_int_noprefix(table_data, len, 2);
309 build_append_int_noprefix(table_data, 0x1, 2); /* Revision */
310 /* Number of offsets */
311 build_append_int_noprefix(table_data, num_offsets, 2);
312 build_append_int_noprefix(table_data, i, 4); /* ACPI Processor UID */
313 /* Offsets */
314 build_append_int_noprefix(table_data, isa_offset, 4);
315 if (cmo_offset) {
316 build_append_int_noprefix(table_data, cmo_offset, 4);
317 }
318
319 if (mmu_offset) {
320 build_append_int_noprefix(table_data, mmu_offset, 4);
321 }
322 }
323
324 acpi_table_end(linker, &table);
325 }
326
327 /* FADT */
328 static void build_fadt_rev6(GArray *table_data,
329 BIOSLinker *linker,
330 RISCVVirtState *s,
331 unsigned dsdt_tbl_offset)
332 {
333 AcpiFadtData fadt = {
334 .rev = 6,
335 .minor_ver = 5,
336 .flags = 1 << ACPI_FADT_F_HW_REDUCED_ACPI,
337 .xdsdt_tbl_offset = &dsdt_tbl_offset,
338 };
339
340 build_fadt(table_data, linker, &fadt, s->oem_id, s->oem_table_id);
341 }
342
343 /* DSDT */
344 static void build_dsdt(GArray *table_data,
345 BIOSLinker *linker,
346 RISCVVirtState *s)
347 {
348 Aml *scope, *dsdt;
349 MachineState *ms = MACHINE(s);
350 uint8_t socket_count;
351 const MemMapEntry *memmap = s->memmap;
352 AcpiTable table = { .sig = "DSDT", .rev = 2, .oem_id = s->oem_id,
353 .oem_table_id = s->oem_table_id };
354
355
356 acpi_table_begin(&table, table_data);
357 dsdt = init_aml_allocator();
358
359 /*
360 * When booting the VM with UEFI, UEFI takes ownership of the RTC hardware.
361 * While UEFI can use libfdt to disable the RTC device node in the DTB that
362 * it passes to the OS, it cannot modify AML. Therefore, we won't generate
363 * the RTC ACPI device at all when using UEFI.
364 */
365 scope = aml_scope("\\_SB");
366 acpi_dsdt_add_cpus(scope, s);
367
368 fw_cfg_acpi_dsdt_add(scope, &memmap[VIRT_FW_CFG]);
369
370 socket_count = riscv_socket_count(ms);
371
372 acpi_dsdt_add_uart(scope, &memmap[VIRT_UART0], UART0_IRQ);
373
374 if (socket_count == 1) {
375 virtio_acpi_dsdt_add(scope, memmap[VIRT_VIRTIO].base,
376 memmap[VIRT_VIRTIO].size,
377 VIRTIO_IRQ, 0, VIRTIO_COUNT);
378 acpi_dsdt_add_gpex_host(scope, PCIE_IRQ);
379 } else if (socket_count == 2) {
380 virtio_acpi_dsdt_add(scope, memmap[VIRT_VIRTIO].base,
381 memmap[VIRT_VIRTIO].size,
382 VIRTIO_IRQ + VIRT_IRQCHIP_NUM_SOURCES, 0,
383 VIRTIO_COUNT);
384 acpi_dsdt_add_gpex_host(scope, PCIE_IRQ + VIRT_IRQCHIP_NUM_SOURCES);
385 } else {
386 virtio_acpi_dsdt_add(scope, memmap[VIRT_VIRTIO].base,
387 memmap[VIRT_VIRTIO].size,
388 VIRTIO_IRQ + VIRT_IRQCHIP_NUM_SOURCES, 0,
389 VIRTIO_COUNT);
390 acpi_dsdt_add_gpex_host(scope, PCIE_IRQ + VIRT_IRQCHIP_NUM_SOURCES * 2);
391 }
392
393 aml_append(dsdt, scope);
394
395 /* copy AML table into ACPI tables blob and patch header there */
396 g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len);
397
398 acpi_table_end(linker, &table);
399 free_aml_allocator();
400 }
401
402 /*
403 * ACPI spec, Revision 6.5+
404 * 5.2.12 Multiple APIC Description Table (MADT)
405 * REF: https://github.com/riscv-non-isa/riscv-acpi/issues/15
406 * https://drive.google.com/file/d/1R6k4MshhN3WTT-hwqAquu5nX6xSEqK2l/view
407 * https://drive.google.com/file/d/1oMGPyOD58JaPgMl1pKasT-VKsIKia7zR/view
408 */
409 static void build_madt(GArray *table_data,
410 BIOSLinker *linker,
411 RISCVVirtState *s)
412 {
413 MachineClass *mc = MACHINE_GET_CLASS(s);
414 MachineState *ms = MACHINE(s);
415 const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(ms);
416 uint8_t group_index_bits = imsic_num_bits(riscv_socket_count(ms));
417 uint8_t guest_index_bits = imsic_num_bits(s->aia_guests + 1);
418 uint16_t imsic_max_hart_per_socket = 0;
419 uint8_t hart_index_bits;
420 uint64_t aplic_addr;
421 uint32_t gsi_base;
422 uint8_t socket;
423
424 for (socket = 0; socket < riscv_socket_count(ms); socket++) {
425 if (imsic_max_hart_per_socket < s->soc[socket].num_harts) {
426 imsic_max_hart_per_socket = s->soc[socket].num_harts;
427 }
428 }
429
430 hart_index_bits = imsic_num_bits(imsic_max_hart_per_socket);
431
432 AcpiTable table = { .sig = "APIC", .rev = 6, .oem_id = s->oem_id,
433 .oem_table_id = s->oem_table_id };
434
435 acpi_table_begin(&table, table_data);
436 /* Local Interrupt Controller Address */
437 build_append_int_noprefix(table_data, 0, 4);
438 build_append_int_noprefix(table_data, 0, 4); /* MADT Flags */
439
440 /* RISC-V Local INTC structures per HART */
441 for (int i = 0; i < arch_ids->len; i++) {
442 riscv_acpi_madt_add_rintc(i, arch_ids, table_data, s);
443 }
444
445 /* IMSIC */
446 if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
447 /* IMSIC */
448 build_append_int_noprefix(table_data, 0x19, 1); /* Type */
449 build_append_int_noprefix(table_data, 16, 1); /* Length */
450 build_append_int_noprefix(table_data, 1, 1); /* Version */
451 build_append_int_noprefix(table_data, 0, 1); /* Reserved */
452 build_append_int_noprefix(table_data, 0, 4); /* Flags */
453 /* Number of supervisor mode Interrupt Identities */
454 build_append_int_noprefix(table_data, VIRT_IRQCHIP_NUM_MSIS, 2);
455 /* Number of guest mode Interrupt Identities */
456 build_append_int_noprefix(table_data, VIRT_IRQCHIP_NUM_MSIS, 2);
457 /* Guest Index Bits */
458 build_append_int_noprefix(table_data, guest_index_bits, 1);
459 /* Hart Index Bits */
460 build_append_int_noprefix(table_data, hart_index_bits, 1);
461 /* Group Index Bits */
462 build_append_int_noprefix(table_data, group_index_bits, 1);
463 /* Group Index Shift */
464 build_append_int_noprefix(table_data, IMSIC_MMIO_GROUP_MIN_SHIFT, 1);
465 }
466
467 if (s->aia_type != VIRT_AIA_TYPE_NONE) {
468 /* APLICs */
469 for (socket = 0; socket < riscv_socket_count(ms); socket++) {
470 aplic_addr = s->memmap[VIRT_APLIC_S].base +
471 s->memmap[VIRT_APLIC_S].size * socket;
472 gsi_base = VIRT_IRQCHIP_NUM_SOURCES * socket;
473 build_append_int_noprefix(table_data, 0x1A, 1); /* Type */
474 build_append_int_noprefix(table_data, 36, 1); /* Length */
475 build_append_int_noprefix(table_data, 1, 1); /* Version */
476 build_append_int_noprefix(table_data, socket, 1); /* APLIC ID */
477 build_append_int_noprefix(table_data, 0, 4); /* Flags */
478 build_append_int_noprefix(table_data, 0, 8); /* Hardware ID */
479 /* Number of IDCs */
480 if (s->aia_type == VIRT_AIA_TYPE_APLIC) {
481 build_append_int_noprefix(table_data,
482 s->soc[socket].num_harts,
483 2);
484 } else {
485 build_append_int_noprefix(table_data, 0, 2);
486 }
487 /* Total External Interrupt Sources Supported */
488 build_append_int_noprefix(table_data, VIRT_IRQCHIP_NUM_SOURCES, 2);
489 /* Global System Interrupt Base */
490 build_append_int_noprefix(table_data, gsi_base, 4);
491 /* APLIC Address */
492 build_append_int_noprefix(table_data, aplic_addr, 8);
493 /* APLIC size */
494 build_append_int_noprefix(table_data,
495 s->memmap[VIRT_APLIC_S].size, 4);
496 }
497 }
498
499 acpi_table_end(linker, &table);
500 }
501
502 static void virt_acpi_build(RISCVVirtState *s, AcpiBuildTables *tables)
503 {
504 GArray *table_offsets;
505 unsigned dsdt, xsdt;
506 GArray *tables_blob = tables->table_data;
507
508 table_offsets = g_array_new(false, true,
509 sizeof(uint32_t));
510
511 bios_linker_loader_alloc(tables->linker,
512 ACPI_BUILD_TABLE_FILE, tables_blob,
513 64, false);
514
515 /* DSDT is pointed to by FADT */
516 dsdt = tables_blob->len;
517 build_dsdt(tables_blob, tables->linker, s);
518
519 /* FADT and others pointed to by XSDT */
520 acpi_add_table(table_offsets, tables_blob);
521 build_fadt_rev6(tables_blob, tables->linker, s, dsdt);
522
523 acpi_add_table(table_offsets, tables_blob);
524 build_madt(tables_blob, tables->linker, s);
525
526 acpi_add_table(table_offsets, tables_blob);
527 build_rhct(tables_blob, tables->linker, s);
528
529 acpi_add_table(table_offsets, tables_blob);
530 {
531 AcpiMcfgInfo mcfg = {
532 .base = s->memmap[VIRT_PCIE_MMIO].base,
533 .size = s->memmap[VIRT_PCIE_MMIO].size,
534 };
535 build_mcfg(tables_blob, tables->linker, &mcfg, s->oem_id,
536 s->oem_table_id);
537 }
538
539 /* XSDT is pointed to by RSDP */
540 xsdt = tables_blob->len;
541 build_xsdt(tables_blob, tables->linker, table_offsets, s->oem_id,
542 s->oem_table_id);
543
544 /* RSDP is in FSEG memory, so allocate it separately */
545 {
546 AcpiRsdpData rsdp_data = {
547 .revision = 2,
548 .oem_id = s->oem_id,
549 .xsdt_tbl_offset = &xsdt,
550 .rsdt_tbl_offset = NULL,
551 };
552 build_rsdp(tables->rsdp, tables->linker, &rsdp_data);
553 }
554
555 /*
556 * The align size is 128, warn if 64k is not enough therefore
557 * the align size could be resized.
558 */
559 if (tables_blob->len > ACPI_BUILD_TABLE_SIZE / 2) {
560 warn_report("ACPI table size %u exceeds %d bytes,"
561 " migration may not work",
562 tables_blob->len, ACPI_BUILD_TABLE_SIZE / 2);
563 error_printf("Try removing some objects.");
564 }
565
566 acpi_align_size(tables_blob, ACPI_BUILD_TABLE_SIZE);
567
568 /* Clean up memory that's no longer used */
569 g_array_free(table_offsets, true);
570 }
571
572 static void acpi_ram_update(MemoryRegion *mr, GArray *data)
573 {
574 uint32_t size = acpi_data_len(data);
575
576 /*
577 * Make sure RAM size is correct - in case it got changed
578 * e.g. by migration
579 */
580 memory_region_ram_resize(mr, size, &error_abort);
581
582 memcpy(memory_region_get_ram_ptr(mr), data->data, size);
583 memory_region_set_dirty(mr, 0, size);
584 }
585
586 static void virt_acpi_build_update(void *build_opaque)
587 {
588 AcpiBuildState *build_state = build_opaque;
589 AcpiBuildTables tables;
590
591 /* No state to update or already patched? Nothing to do. */
592 if (!build_state || build_state->patched) {
593 return;
594 }
595
596 build_state->patched = true;
597
598 acpi_build_tables_init(&tables);
599
600 virt_acpi_build(RISCV_VIRT_MACHINE(qdev_get_machine()), &tables);
601
602 acpi_ram_update(build_state->table_mr, tables.table_data);
603 acpi_ram_update(build_state->rsdp_mr, tables.rsdp);
604 acpi_ram_update(build_state->linker_mr, tables.linker->cmd_blob);
605
606 acpi_build_tables_cleanup(&tables, true);
607 }
608
609 static void virt_acpi_build_reset(void *build_opaque)
610 {
611 AcpiBuildState *build_state = build_opaque;
612 build_state->patched = false;
613 }
614
615 static const VMStateDescription vmstate_virt_acpi_build = {
616 .name = "virt_acpi_build",
617 .version_id = 1,
618 .minimum_version_id = 1,
619 .fields = (const VMStateField[]) {
620 VMSTATE_BOOL(patched, AcpiBuildState),
621 VMSTATE_END_OF_LIST()
622 },
623 };
624
625 void virt_acpi_setup(RISCVVirtState *s)
626 {
627 AcpiBuildTables tables;
628 AcpiBuildState *build_state;
629
630 build_state = g_malloc0(sizeof *build_state);
631
632 acpi_build_tables_init(&tables);
633 virt_acpi_build(s, &tables);
634
635 /* Now expose it all to Guest */
636 build_state->table_mr = acpi_add_rom_blob(virt_acpi_build_update,
637 build_state, tables.table_data,
638 ACPI_BUILD_TABLE_FILE);
639 assert(build_state->table_mr != NULL);
640
641 build_state->linker_mr = acpi_add_rom_blob(virt_acpi_build_update,
642 build_state,
643 tables.linker->cmd_blob,
644 ACPI_BUILD_LOADER_FILE);
645
646 build_state->rsdp_mr = acpi_add_rom_blob(virt_acpi_build_update,
647 build_state, tables.rsdp,
648 ACPI_BUILD_RSDP_FILE);
649
650 qemu_register_reset(virt_acpi_build_reset, build_state);
651 virt_acpi_build_reset(build_state);
652 vmstate_register(NULL, 0, &vmstate_virt_acpi_build, build_state);
653
654 /*
655 * Clean up tables but don't free the memory: we track it
656 * in build_state.
657 */
658 acpi_build_tables_cleanup(&tables, false);
659 }