]> git.proxmox.com Git - mirror_qemu.git/blame - hw/acpi/cpu.c
Merge tag 'pull-nbd-2023-09-07-v2' of https://repo.or.cz/qemu/ericb into staging
[mirror_qemu.git] / hw / acpi / cpu.c
CommitLineData
5e1b5d93 1#include "qemu/osdep.h"
d6454270 2#include "migration/vmstate.h"
5e1b5d93
IM
3#include "hw/acpi/cpu.h"
4#include "qapi/error.h"
27c9188f 5#include "qapi/qapi-events-acpi.h"
5e1b5d93 6#include "trace.h"
27111931 7#include "sysemu/numa.h"
5e1b5d93
IM
8
9#define ACPI_CPU_HOTPLUG_REG_LEN 12
10#define ACPI_CPU_SELECTOR_OFFSET_WR 0
11#define ACPI_CPU_FLAGS_OFFSET_RW 4
d2238cb6
IM
12#define ACPI_CPU_CMD_OFFSET_WR 5
13#define ACPI_CPU_CMD_DATA_OFFSET_RW 8
e6d0c3ce 14#define ACPI_CPU_CMD_DATA2_OFFSET_R 0
d2238cb6 15
9cc5a90b
IM
16#define OVMF_CPUHP_SMI_CMD 4
17
d2238cb6
IM
18enum {
19 CPHP_GET_NEXT_CPU_WITH_EVENT_CMD = 0,
76623d00
IM
20 CPHP_OST_EVENT_CMD = 1,
21 CPHP_OST_STATUS_CMD = 2,
3a61c8db 22 CPHP_GET_CPU_ID_CMD = 3,
d2238cb6
IM
23 CPHP_CMD_MAX
24};
5e1b5d93 25
76623d00
IM
26static ACPIOSTInfo *acpi_cpu_device_status(int idx, AcpiCpuStatus *cdev)
27{
28 ACPIOSTInfo *info = g_new0(ACPIOSTInfo, 1);
29
30 info->slot_type = ACPI_SLOT_TYPE_CPU;
31 info->slot = g_strdup_printf("%d", idx);
32 info->source = cdev->ost_event;
33 info->status = cdev->ost_status;
34 if (cdev->cpu) {
35 DeviceState *dev = DEVICE(cdev->cpu);
36 if (dev->id) {
37 info->device = g_strdup(dev->id);
76623d00
IM
38 }
39 }
40 return info;
41}
42
43void acpi_cpu_ospm_status(CPUHotplugState *cpu_st, ACPIOSTInfoList ***list)
44{
c3033fd3 45 ACPIOSTInfoList ***tail = list;
76623d00
IM
46 int i;
47
48 for (i = 0; i < cpu_st->dev_count; i++) {
c3033fd3 49 QAPI_LIST_APPEND(*tail, acpi_cpu_device_status(i, &cpu_st->devs[i]));
76623d00
IM
50 }
51}
52
5e1b5d93
IM
53static uint64_t cpu_hotplug_rd(void *opaque, hwaddr addr, unsigned size)
54{
55 uint64_t val = 0;
56 CPUHotplugState *cpu_st = opaque;
57 AcpiCpuStatus *cdev;
58
59 if (cpu_st->selector >= cpu_st->dev_count) {
60 return val;
61 }
62
63 cdev = &cpu_st->devs[cpu_st->selector];
64 switch (addr) {
65 case ACPI_CPU_FLAGS_OFFSET_RW: /* pack and return is_* fields */
66 val |= cdev->cpu ? 1 : 0;
d2238cb6 67 val |= cdev->is_inserting ? 2 : 0;
8872c25a 68 val |= cdev->is_removing ? 4 : 0;
1e6107d9 69 val |= cdev->fw_remove ? 16 : 0;
5e1b5d93
IM
70 trace_cpuhp_acpi_read_flags(cpu_st->selector, val);
71 break;
d2238cb6
IM
72 case ACPI_CPU_CMD_DATA_OFFSET_RW:
73 switch (cpu_st->command) {
74 case CPHP_GET_NEXT_CPU_WITH_EVENT_CMD:
75 val = cpu_st->selector;
76 break;
3a61c8db
IM
77 case CPHP_GET_CPU_ID_CMD:
78 val = cdev->arch_id & 0xFFFFFFFF;
79 break;
d2238cb6
IM
80 default:
81 break;
82 }
83 trace_cpuhp_acpi_read_cmd_data(cpu_st->selector, val);
84 break;
e6d0c3ce
IM
85 case ACPI_CPU_CMD_DATA2_OFFSET_R:
86 switch (cpu_st->command) {
87 case CPHP_GET_NEXT_CPU_WITH_EVENT_CMD:
88 val = 0;
89 break;
3a61c8db
IM
90 case CPHP_GET_CPU_ID_CMD:
91 val = cdev->arch_id >> 32;
92 break;
e6d0c3ce
IM
93 default:
94 break;
95 }
96 trace_cpuhp_acpi_read_cmd_data2(cpu_st->selector, val);
97 break;
5e1b5d93
IM
98 default:
99 break;
100 }
101 return val;
102}
103
104static void cpu_hotplug_wr(void *opaque, hwaddr addr, uint64_t data,
105 unsigned int size)
106{
107 CPUHotplugState *cpu_st = opaque;
d2238cb6 108 AcpiCpuStatus *cdev;
76623d00 109 ACPIOSTInfo *info;
5e1b5d93
IM
110
111 assert(cpu_st->dev_count);
112
113 if (addr) {
114 if (cpu_st->selector >= cpu_st->dev_count) {
115 trace_cpuhp_acpi_invalid_idx_selected(cpu_st->selector);
116 return;
117 }
118 }
119
120 switch (addr) {
121 case ACPI_CPU_SELECTOR_OFFSET_WR: /* current CPU selector */
122 cpu_st->selector = data;
123 trace_cpuhp_acpi_write_idx(cpu_st->selector);
124 break;
d2238cb6
IM
125 case ACPI_CPU_FLAGS_OFFSET_RW: /* set is_* fields */
126 cdev = &cpu_st->devs[cpu_st->selector];
127 if (data & 2) { /* clear insert event */
128 cdev->is_inserting = false;
129 trace_cpuhp_acpi_clear_inserting_evt(cpu_st->selector);
8872c25a
IM
130 } else if (data & 4) { /* clear remove event */
131 cdev->is_removing = false;
132 trace_cpuhp_acpi_clear_remove_evt(cpu_st->selector);
133 } else if (data & 8) {
134 DeviceState *dev = NULL;
135 HotplugHandler *hotplug_ctrl = NULL;
136
c2d2a81b 137 if (!cdev->cpu || cdev->cpu == first_cpu) {
8872c25a
IM
138 trace_cpuhp_acpi_ejecting_invalid_cpu(cpu_st->selector);
139 break;
140 }
141
142 trace_cpuhp_acpi_ejecting_cpu(cpu_st->selector);
143 dev = DEVICE(cdev->cpu);
144 hotplug_ctrl = qdev_get_hotplug_handler(dev);
145 hotplug_handler_unplug(hotplug_ctrl, dev, NULL);
07578b0a 146 object_unparent(OBJECT(dev));
1e6107d9
IM
147 cdev->fw_remove = false;
148 } else if (data & 16) {
149 if (!cdev->cpu || cdev->cpu == first_cpu) {
150 trace_cpuhp_acpi_fw_remove_invalid_cpu(cpu_st->selector);
151 break;
152 }
153 trace_cpuhp_acpi_fw_remove_cpu(cpu_st->selector);
154 cdev->fw_remove = true;
d2238cb6
IM
155 }
156 break;
157 case ACPI_CPU_CMD_OFFSET_WR:
158 trace_cpuhp_acpi_write_cmd(cpu_st->selector, data);
159 if (data < CPHP_CMD_MAX) {
160 cpu_st->command = data;
161 if (cpu_st->command == CPHP_GET_NEXT_CPU_WITH_EVENT_CMD) {
162 uint32_t iter = cpu_st->selector;
163
164 do {
165 cdev = &cpu_st->devs[iter];
1e6107d9
IM
166 if (cdev->is_inserting || cdev->is_removing ||
167 cdev->fw_remove) {
d2238cb6
IM
168 cpu_st->selector = iter;
169 trace_cpuhp_acpi_cpu_has_events(cpu_st->selector,
8872c25a 170 cdev->is_inserting, cdev->is_removing);
d2238cb6
IM
171 break;
172 }
173 iter = iter + 1 < cpu_st->dev_count ? iter + 1 : 0;
174 } while (iter != cpu_st->selector);
175 }
176 }
177 break;
76623d00
IM
178 case ACPI_CPU_CMD_DATA_OFFSET_RW:
179 switch (cpu_st->command) {
180 case CPHP_OST_EVENT_CMD: {
181 cdev = &cpu_st->devs[cpu_st->selector];
182 cdev->ost_event = data;
183 trace_cpuhp_acpi_write_ost_ev(cpu_st->selector, cdev->ost_event);
184 break;
185 }
186 case CPHP_OST_STATUS_CMD: {
187 cdev = &cpu_st->devs[cpu_st->selector];
188 cdev->ost_status = data;
189 info = acpi_cpu_device_status(cpu_st->selector, cdev);
3ab72385 190 qapi_event_send_acpi_device_ost(info);
76623d00
IM
191 qapi_free_ACPIOSTInfo(info);
192 trace_cpuhp_acpi_write_ost_status(cpu_st->selector,
193 cdev->ost_status);
194 break;
195 }
196 default:
197 break;
198 }
199 break;
5e1b5d93
IM
200 default:
201 break;
202 }
203}
204
205static const MemoryRegionOps cpu_hotplug_ops = {
206 .read = cpu_hotplug_rd,
207 .write = cpu_hotplug_wr,
208 .endianness = DEVICE_LITTLE_ENDIAN,
209 .valid = {
210 .min_access_size = 1,
211 .max_access_size = 4,
212 },
213};
214
215void cpu_hotplug_hw_init(MemoryRegion *as, Object *owner,
216 CPUHotplugState *state, hwaddr base_addr)
217{
218 MachineState *machine = MACHINE(qdev_get_machine());
219 MachineClass *mc = MACHINE_GET_CLASS(machine);
80e5db30 220 const CPUArchIdList *id_list;
5e1b5d93
IM
221 int i;
222
223 assert(mc->possible_cpu_arch_ids);
224 id_list = mc->possible_cpu_arch_ids(machine);
225 state->dev_count = id_list->len;
226 state->devs = g_new0(typeof(*state->devs), state->dev_count);
227 for (i = 0; i < id_list->len; i++) {
8aba3842 228 state->devs[i].cpu = CPU(id_list->cpus[i].cpu);
5e1b5d93
IM
229 state->devs[i].arch_id = id_list->cpus[i].arch_id;
230 }
5e1b5d93 231 memory_region_init_io(&state->ctrl_reg, owner, &cpu_hotplug_ops, state,
119a2ef1 232 "acpi-cpu-hotplug", ACPI_CPU_HOTPLUG_REG_LEN);
5e1b5d93
IM
233 memory_region_add_subregion(as, base_addr, &state->ctrl_reg);
234}
235
236static AcpiCpuStatus *get_cpu_status(CPUHotplugState *cpu_st, DeviceState *dev)
237{
238 CPUClass *k = CPU_GET_CLASS(dev);
239 uint64_t cpu_arch_id = k->get_arch_id(CPU(dev));
240 int i;
241
242 for (i = 0; i < cpu_st->dev_count; i++) {
243 if (cpu_arch_id == cpu_st->devs[i].arch_id) {
244 return &cpu_st->devs[i];
245 }
246 }
247 return NULL;
248}
249
250void acpi_cpu_plug_cb(HotplugHandler *hotplug_dev,
251 CPUHotplugState *cpu_st, DeviceState *dev, Error **errp)
252{
253 AcpiCpuStatus *cdev;
254
255 cdev = get_cpu_status(cpu_st, dev);
256 if (!cdev) {
257 return;
258 }
259
260 cdev->cpu = CPU(dev);
d2238cb6
IM
261 if (dev->hotplugged) {
262 cdev->is_inserting = true;
263 acpi_send_event(DEVICE(hotplug_dev), ACPI_CPU_HOTPLUG_STATUS);
264 }
5e1b5d93
IM
265}
266
8872c25a
IM
267void acpi_cpu_unplug_request_cb(HotplugHandler *hotplug_dev,
268 CPUHotplugState *cpu_st,
269 DeviceState *dev, Error **errp)
270{
271 AcpiCpuStatus *cdev;
272
273 cdev = get_cpu_status(cpu_st, dev);
274 if (!cdev) {
275 return;
276 }
277
278 cdev->is_removing = true;
279 acpi_send_event(DEVICE(hotplug_dev), ACPI_CPU_HOTPLUG_STATUS);
280}
281
282void acpi_cpu_unplug_cb(CPUHotplugState *cpu_st,
283 DeviceState *dev, Error **errp)
284{
285 AcpiCpuStatus *cdev;
286
287 cdev = get_cpu_status(cpu_st, dev);
288 if (!cdev) {
289 return;
290 }
291
292 cdev->cpu = NULL;
293}
294
d2238cb6
IM
295static const VMStateDescription vmstate_cpuhp_sts = {
296 .name = "CPU hotplug device state",
297 .version_id = 1,
298 .minimum_version_id = 1,
d2238cb6
IM
299 .fields = (VMStateField[]) {
300 VMSTATE_BOOL(is_inserting, AcpiCpuStatus),
8872c25a 301 VMSTATE_BOOL(is_removing, AcpiCpuStatus),
76623d00
IM
302 VMSTATE_UINT32(ost_event, AcpiCpuStatus),
303 VMSTATE_UINT32(ost_status, AcpiCpuStatus),
d2238cb6
IM
304 VMSTATE_END_OF_LIST()
305 }
306};
307
5e1b5d93
IM
308const VMStateDescription vmstate_cpu_hotplug = {
309 .name = "CPU hotplug state",
310 .version_id = 1,
311 .minimum_version_id = 1,
5e1b5d93
IM
312 .fields = (VMStateField[]) {
313 VMSTATE_UINT32(selector, CPUHotplugState),
d2238cb6
IM
314 VMSTATE_UINT8(command, CPUHotplugState),
315 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(devs, CPUHotplugState, dev_count,
316 vmstate_cpuhp_sts, AcpiCpuStatus),
5e1b5d93
IM
317 VMSTATE_END_OF_LIST()
318 }
319};
320
321#define CPU_NAME_FMT "C%.03X"
322#define CPUHP_RES_DEVICE "PRES"
323#define CPU_LOCK "CPLK"
324#define CPU_STS_METHOD "CSTA"
d2238cb6
IM
325#define CPU_SCAN_METHOD "CSCN"
326#define CPU_NOTIFY_METHOD "CTFY"
8872c25a 327#define CPU_EJECT_METHOD "CEJ0"
76623d00 328#define CPU_OST_METHOD "COST"
9cc5a90b 329#define CPU_ADDED_LIST "CNEW"
5e1b5d93
IM
330
331#define CPU_ENABLED "CPEN"
332#define CPU_SELECTOR "CSEL"
d2238cb6
IM
333#define CPU_COMMAND "CCMD"
334#define CPU_DATA "CDAT"
335#define CPU_INSERT_EVENT "CINS"
8872c25a
IM
336#define CPU_REMOVE_EVENT "CRMV"
337#define CPU_EJECT_EVENT "CEJ0"
69dea9d6 338#define CPU_FW_EJECT_EVENT "CEJF"
5e1b5d93
IM
339
340void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
341 hwaddr io_base,
d2238cb6
IM
342 const char *res_root,
343 const char *event_handler_method)
5e1b5d93
IM
344{
345 Aml *ifctx;
346 Aml *field;
347 Aml *method;
348 Aml *cpu_ctrl_dev;
349 Aml *cpus_dev;
350 Aml *zero = aml_int(0);
351 Aml *one = aml_int(1);
352 Aml *sb_scope = aml_scope("_SB");
353 MachineClass *mc = MACHINE_GET_CLASS(machine);
80e5db30 354 const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(machine);
5e1b5d93 355 char *cphp_res_path = g_strdup_printf("%s." CPUHP_RES_DEVICE, res_root);
d2238cb6
IM
356 Object *obj = object_resolve_path_type("", TYPE_ACPI_DEVICE_IF, NULL);
357 AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_GET_CLASS(obj);
5e1b5d93
IM
358
359 cpu_ctrl_dev = aml_device("%s", cphp_res_path);
360 {
361 Aml *crs;
362
363 aml_append(cpu_ctrl_dev,
364 aml_name_decl("_HID", aml_eisaid("PNP0A06")));
365 aml_append(cpu_ctrl_dev,
366 aml_name_decl("_UID", aml_string("CPU Hotplug resources")));
367 aml_append(cpu_ctrl_dev, aml_mutex(CPU_LOCK, 0));
368
369 crs = aml_resource_template();
370 aml_append(crs, aml_io(AML_DECODE16, io_base, io_base, 1,
371 ACPI_CPU_HOTPLUG_REG_LEN));
372 aml_append(cpu_ctrl_dev, aml_name_decl("_CRS", crs));
373
374 /* declare CPU hotplug MMIO region with related access fields */
375 aml_append(cpu_ctrl_dev,
376 aml_operation_region("PRST", AML_SYSTEM_IO, aml_int(io_base),
377 ACPI_CPU_HOTPLUG_REG_LEN));
378
379 field = aml_field("PRST", AML_BYTE_ACC, AML_NOLOCK,
380 AML_WRITE_AS_ZEROS);
381 aml_append(field, aml_reserved_field(ACPI_CPU_FLAGS_OFFSET_RW * 8));
382 /* 1 if enabled, read only */
383 aml_append(field, aml_named_field(CPU_ENABLED, 1));
d2238cb6
IM
384 /* (read) 1 if has a insert event. (write) 1 to clear event */
385 aml_append(field, aml_named_field(CPU_INSERT_EVENT, 1));
8872c25a
IM
386 /* (read) 1 if has a remove event. (write) 1 to clear event */
387 aml_append(field, aml_named_field(CPU_REMOVE_EVENT, 1));
388 /* initiates device eject, write only */
389 aml_append(field, aml_named_field(CPU_EJECT_EVENT, 1));
69dea9d6
IM
390 /* tell firmware to do device eject, write only */
391 aml_append(field, aml_named_field(CPU_FW_EJECT_EVENT, 1));
392 aml_append(field, aml_reserved_field(3));
d2238cb6 393 aml_append(field, aml_named_field(CPU_COMMAND, 8));
5e1b5d93
IM
394 aml_append(cpu_ctrl_dev, field);
395
396 field = aml_field("PRST", AML_DWORD_ACC, AML_NOLOCK, AML_PRESERVE);
397 /* CPU selector, write only */
398 aml_append(field, aml_named_field(CPU_SELECTOR, 32));
d2238cb6
IM
399 /* flags + cmd + 2byte align */
400 aml_append(field, aml_reserved_field(4 * 8));
401 aml_append(field, aml_named_field(CPU_DATA, 32));
5e1b5d93
IM
402 aml_append(cpu_ctrl_dev, field);
403
679dd1a9
IM
404 if (opts.has_legacy_cphp) {
405 method = aml_method("_INI", 0, AML_SERIALIZED);
406 /* switch off legacy CPU hotplug HW and use new one,
407 * on reboot system is in new mode and writing 0
408 * in CPU_SELECTOR selects BSP, which is NOP at
409 * the time _INI is called */
410 aml_append(method, aml_store(zero, aml_name(CPU_SELECTOR)));
411 aml_append(cpu_ctrl_dev, method);
412 }
5e1b5d93
IM
413 }
414 aml_append(sb_scope, cpu_ctrl_dev);
415
416 cpus_dev = aml_device("\\_SB.CPUS");
417 {
418 int i;
419 Aml *ctrl_lock = aml_name("%s.%s", cphp_res_path, CPU_LOCK);
420 Aml *cpu_selector = aml_name("%s.%s", cphp_res_path, CPU_SELECTOR);
421 Aml *is_enabled = aml_name("%s.%s", cphp_res_path, CPU_ENABLED);
d2238cb6
IM
422 Aml *cpu_cmd = aml_name("%s.%s", cphp_res_path, CPU_COMMAND);
423 Aml *cpu_data = aml_name("%s.%s", cphp_res_path, CPU_DATA);
424 Aml *ins_evt = aml_name("%s.%s", cphp_res_path, CPU_INSERT_EVENT);
8872c25a
IM
425 Aml *rm_evt = aml_name("%s.%s", cphp_res_path, CPU_REMOVE_EVENT);
426 Aml *ej_evt = aml_name("%s.%s", cphp_res_path, CPU_EJECT_EVENT);
69dea9d6 427 Aml *fw_ej_evt = aml_name("%s.%s", cphp_res_path, CPU_FW_EJECT_EVENT);
5e1b5d93
IM
428
429 aml_append(cpus_dev, aml_name_decl("_HID", aml_string("ACPI0010")));
430 aml_append(cpus_dev, aml_name_decl("_CID", aml_eisaid("PNP0A05")));
431
d2238cb6
IM
432 method = aml_method(CPU_NOTIFY_METHOD, 2, AML_NOTSERIALIZED);
433 for (i = 0; i < arch_ids->len; i++) {
434 Aml *cpu = aml_name(CPU_NAME_FMT, i);
435 Aml *uid = aml_arg(0);
436 Aml *event = aml_arg(1);
437
438 ifctx = aml_if(aml_equal(uid, aml_int(i)));
439 {
440 aml_append(ifctx, aml_notify(cpu, event));
441 }
442 aml_append(method, ifctx);
443 }
444 aml_append(cpus_dev, method);
445
5e1b5d93
IM
446 method = aml_method(CPU_STS_METHOD, 1, AML_SERIALIZED);
447 {
448 Aml *idx = aml_arg(0);
449 Aml *sta = aml_local(0);
450
451 aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
452 aml_append(method, aml_store(idx, cpu_selector));
453 aml_append(method, aml_store(zero, sta));
454 ifctx = aml_if(aml_equal(is_enabled, one));
455 {
456 aml_append(ifctx, aml_store(aml_int(0xF), sta));
457 }
458 aml_append(method, ifctx);
459 aml_append(method, aml_release(ctrl_lock));
460 aml_append(method, aml_return(sta));
461 }
462 aml_append(cpus_dev, method);
463
8872c25a
IM
464 method = aml_method(CPU_EJECT_METHOD, 1, AML_SERIALIZED);
465 {
466 Aml *idx = aml_arg(0);
467
468 aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
469 aml_append(method, aml_store(idx, cpu_selector));
69dea9d6
IM
470 if (opts.fw_unplugs_cpu) {
471 aml_append(method, aml_store(one, fw_ej_evt));
472 aml_append(method, aml_store(aml_int(OVMF_CPUHP_SMI_CMD),
473 aml_name("%s", opts.smi_path)));
474 } else {
475 aml_append(method, aml_store(one, ej_evt));
476 }
8872c25a
IM
477 aml_append(method, aml_release(ctrl_lock));
478 }
479 aml_append(cpus_dev, method);
480
d2238cb6
IM
481 method = aml_method(CPU_SCAN_METHOD, 0, AML_SERIALIZED);
482 {
9cc5a90b 483 const uint8_t max_cpus_per_pass = 255;
8872c25a 484 Aml *else_ctx;
9cc5a90b 485 Aml *while_ctx, *while_ctx2;
d2238cb6
IM
486 Aml *has_event = aml_local(0);
487 Aml *dev_chk = aml_int(1);
8872c25a 488 Aml *eject_req = aml_int(3);
d2238cb6 489 Aml *next_cpu_cmd = aml_int(CPHP_GET_NEXT_CPU_WITH_EVENT_CMD);
9cc5a90b
IM
490 Aml *num_added_cpus = aml_local(1);
491 Aml *cpu_idx = aml_local(2);
492 Aml *uid = aml_local(3);
493 Aml *has_job = aml_local(4);
494 Aml *new_cpus = aml_name(CPU_ADDED_LIST);
d2238cb6
IM
495
496 aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
9cc5a90b
IM
497
498 /*
499 * Windows versions newer than XP (including Windows 10/Windows
500 * Server 2019), do support* VarPackageOp but, it is cripled to hold
501 * the same elements number as old PackageOp.
502 * For compatibility with Windows XP (so it won't crash) use ACPI1.0
503 * PackageOp which can hold max 255 elements.
504 *
505 * use named package as old Windows don't support it in local var
506 */
507 aml_append(method, aml_name_decl(CPU_ADDED_LIST,
508 aml_package(max_cpus_per_pass)));
509
510 aml_append(method, aml_store(zero, uid));
511 aml_append(method, aml_store(one, has_job));
512 /*
513 * CPU_ADDED_LIST can hold limited number of elements, outer loop
514 * allows to process CPUs in batches which let us to handle more
515 * CPUs than CPU_ADDED_LIST can hold.
516 */
517 while_ctx2 = aml_while(aml_equal(has_job, one));
d2238cb6 518 {
9cc5a90b
IM
519 aml_append(while_ctx2, aml_store(zero, has_job));
520
521 aml_append(while_ctx2, aml_store(one, has_event));
522 aml_append(while_ctx2, aml_store(zero, num_added_cpus));
523
524 /*
525 * Scan CPUs, till there are CPUs with events or
526 * CPU_ADDED_LIST capacity is exhausted
527 */
528 while_ctx = aml_while(aml_land(aml_equal(has_event, one),
529 aml_lless(uid, aml_int(arch_ids->len))));
530 {
531 /*
532 * clear loop exit condition, ins_evt/rm_evt checks will
533 * set it to 1 while next_cpu_cmd returns a CPU with events
534 */
535 aml_append(while_ctx, aml_store(zero, has_event));
536
537 aml_append(while_ctx, aml_store(uid, cpu_selector));
538 aml_append(while_ctx, aml_store(next_cpu_cmd, cpu_cmd));
539
540 /*
541 * wrap around case, scan is complete, exit loop.
542 * It happens since events are not cleared in scan loop,
543 * so next_cpu_cmd continues to find already processed CPUs
544 */
545 ifctx = aml_if(aml_lless(cpu_data, uid));
546 {
547 aml_append(ifctx, aml_break());
548 }
549 aml_append(while_ctx, ifctx);
550
551 /*
552 * if CPU_ADDED_LIST is full, exit inner loop and process
553 * collected CPUs
554 */
555 ifctx = aml_if(
556 aml_equal(num_added_cpus, aml_int(max_cpus_per_pass)));
557 {
558 aml_append(ifctx, aml_store(one, has_job));
559 aml_append(ifctx, aml_break());
560 }
561 aml_append(while_ctx, ifctx);
562
563 aml_append(while_ctx, aml_store(cpu_data, uid));
564 ifctx = aml_if(aml_equal(ins_evt, one));
565 {
566 /* cache added CPUs to Notify/Wakeup later */
567 aml_append(ifctx, aml_store(uid,
568 aml_index(new_cpus, num_added_cpus)));
569 aml_append(ifctx, aml_increment(num_added_cpus));
570 aml_append(ifctx, aml_store(one, has_event));
571 }
572 aml_append(while_ctx, ifctx);
573 else_ctx = aml_else();
574 ifctx = aml_if(aml_equal(rm_evt, one));
575 {
576 aml_append(ifctx,
577 aml_call2(CPU_NOTIFY_METHOD, uid, eject_req));
578 aml_append(ifctx, aml_store(one, rm_evt));
579 aml_append(ifctx, aml_store(one, has_event));
580 }
581 aml_append(else_ctx, ifctx);
582 aml_append(while_ctx, else_ctx);
583 aml_append(while_ctx, aml_increment(uid));
584 }
585 aml_append(while_ctx2, while_ctx);
586
587 /*
588 * in case FW negotiated ICH9_LPC_SMI_F_CPU_HOTPLUG_BIT,
589 * make upcall to FW, so it can pull in new CPUs before
590 * OS is notified and wakes them up
591 */
592 if (opts.smi_path) {
593 ifctx = aml_if(aml_lgreater(num_added_cpus, zero));
594 {
595 aml_append(ifctx, aml_store(aml_int(OVMF_CPUHP_SMI_CMD),
596 aml_name("%s", opts.smi_path)));
597 }
598 aml_append(while_ctx2, ifctx);
599 }
600
601 /* Notify OSPM about new CPUs and clear insert events */
602 aml_append(while_ctx2, aml_store(zero, cpu_idx));
603 while_ctx = aml_while(aml_lless(cpu_idx, num_added_cpus));
604 {
605 aml_append(while_ctx,
606 aml_store(aml_derefof(aml_index(new_cpus, cpu_idx)),
607 uid));
608 aml_append(while_ctx,
609 aml_call2(CPU_NOTIFY_METHOD, uid, dev_chk));
610 aml_append(while_ctx, aml_store(uid, aml_debug()));
611 aml_append(while_ctx, aml_store(uid, cpu_selector));
612 aml_append(while_ctx, aml_store(one, ins_evt));
613 aml_append(while_ctx, aml_increment(cpu_idx));
614 }
615 aml_append(while_ctx2, while_ctx);
616 /*
617 * If another batch is needed, then it will resume scanning
618 * exactly at -- and not after -- the last CPU that's currently
619 * in CPU_ADDED_LIST. In other words, the last CPU in
620 * CPU_ADDED_LIST is going to be re-checked. That's OK: we've
621 * just cleared the insert event for *all* CPUs in
622 * CPU_ADDED_LIST, including the last one. So the scan will
623 * simply seek past it.
624 */
d2238cb6 625 }
9cc5a90b 626 aml_append(method, while_ctx2);
d2238cb6
IM
627 aml_append(method, aml_release(ctrl_lock));
628 }
629 aml_append(cpus_dev, method);
630
76623d00
IM
631 method = aml_method(CPU_OST_METHOD, 4, AML_SERIALIZED);
632 {
633 Aml *uid = aml_arg(0);
634 Aml *ev_cmd = aml_int(CPHP_OST_EVENT_CMD);
635 Aml *st_cmd = aml_int(CPHP_OST_STATUS_CMD);
636
637 aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
638 aml_append(method, aml_store(uid, cpu_selector));
639 aml_append(method, aml_store(ev_cmd, cpu_cmd));
640 aml_append(method, aml_store(aml_arg(1), cpu_data));
641 aml_append(method, aml_store(st_cmd, cpu_cmd));
642 aml_append(method, aml_store(aml_arg(2), cpu_data));
643 aml_append(method, aml_release(ctrl_lock));
644 }
645 aml_append(cpus_dev, method);
646
5e1b5d93
IM
647 /* build Processor object for each processor */
648 for (i = 0; i < arch_ids->len; i++) {
649 Aml *dev;
650 Aml *uid = aml_int(i);
d2238cb6 651 GArray *madt_buf = g_array_new(0, 1, 1);
5e1b5d93
IM
652 int arch_id = arch_ids->cpus[i].arch_id;
653
89cb0c04 654 if (opts.acpi_1_compatible && arch_id < 255) {
5e1b5d93
IM
655 dev = aml_processor(i, 0, 0, CPU_NAME_FMT, i);
656 } else {
657 dev = aml_device(CPU_NAME_FMT, i);
658 aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0007")));
659 aml_append(dev, aml_name_decl("_UID", uid));
660 }
661
662 method = aml_method("_STA", 0, AML_SERIALIZED);
663 aml_append(method, aml_return(aml_call1(CPU_STS_METHOD, uid)));
664 aml_append(dev, method);
665
d2238cb6
IM
666 /* build _MAT object */
667 assert(adevc && adevc->madt_cpu);
d395b18d 668 adevc->madt_cpu(i, arch_ids, madt_buf,
d0aa026a 669 true); /* set enabled flag */
d2238cb6
IM
670 aml_append(dev, aml_name_decl("_MAT",
671 aml_buffer(madt_buf->len, (uint8_t *)madt_buf->data)));
672 g_array_free(madt_buf, true);
673
c2d2a81b
IM
674 if (CPU(arch_ids->cpus[i].cpu) != first_cpu) {
675 method = aml_method("_EJ0", 1, AML_NOTSERIALIZED);
676 aml_append(method, aml_call1(CPU_EJECT_METHOD, uid));
677 aml_append(dev, method);
678 }
8872c25a 679
76623d00
IM
680 method = aml_method("_OST", 3, AML_SERIALIZED);
681 aml_append(method,
682 aml_call4(CPU_OST_METHOD, uid, aml_arg(0),
683 aml_arg(1), aml_arg(2))
684 );
685 aml_append(dev, method);
27111931
IM
686
687 /* Linux guests discard SRAT info for non-present CPUs
688 * as a result _PXM is required for all CPUs which might
689 * be hot-plugged. For simplicity, add it for all CPUs.
690 */
ea265072
IM
691 if (arch_ids->cpus[i].props.has_node_id) {
692 aml_append(dev, aml_name_decl("_PXM",
693 aml_int(arch_ids->cpus[i].props.node_id)));
27111931
IM
694 }
695
5e1b5d93
IM
696 aml_append(cpus_dev, dev);
697 }
698 }
699 aml_append(sb_scope, cpus_dev);
700 aml_append(table, sb_scope);
701
d2238cb6
IM
702 method = aml_method(event_handler_method, 0, AML_NOTSERIALIZED);
703 aml_append(method, aml_call0("\\_SB.CPUS." CPU_SCAN_METHOD));
704 aml_append(table, method);
705
5e1b5d93 706 g_free(cphp_res_path);
5e1b5d93 707}