]> git.proxmox.com Git - mirror_qemu.git/blob - hw/ppc/spapr_events.c
Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging
[mirror_qemu.git] / hw / ppc / spapr_events.c
1 /*
2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
3 *
4 * RTAS events handling
5 *
6 * Copyright (c) 2012 David Gibson, IBM Corporation.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
25 *
26 */
27 #include "qemu/osdep.h"
28 #include "qapi/error.h"
29 #include "cpu.h"
30 #include "sysemu/sysemu.h"
31 #include "hw/qdev.h"
32 #include "sysemu/device_tree.h"
33
34 #include "hw/ppc/fdt.h"
35 #include "hw/ppc/spapr.h"
36 #include "hw/ppc/spapr_vio.h"
37 #include "hw/pci/pci.h"
38 #include "hw/pci-host/spapr.h"
39 #include "hw/ppc/spapr_drc.h"
40 #include "qemu/help_option.h"
41 #include "qemu/bcd.h"
42 #include "hw/ppc/spapr_ovec.h"
43 #include <libfdt.h>
44
45 struct rtas_error_log {
46 uint32_t summary;
47 #define RTAS_LOG_VERSION_MASK 0xff000000
48 #define RTAS_LOG_VERSION_6 0x06000000
49 #define RTAS_LOG_SEVERITY_MASK 0x00e00000
50 #define RTAS_LOG_SEVERITY_ALREADY_REPORTED 0x00c00000
51 #define RTAS_LOG_SEVERITY_FATAL 0x00a00000
52 #define RTAS_LOG_SEVERITY_ERROR 0x00800000
53 #define RTAS_LOG_SEVERITY_ERROR_SYNC 0x00600000
54 #define RTAS_LOG_SEVERITY_WARNING 0x00400000
55 #define RTAS_LOG_SEVERITY_EVENT 0x00200000
56 #define RTAS_LOG_SEVERITY_NO_ERROR 0x00000000
57 #define RTAS_LOG_DISPOSITION_MASK 0x00180000
58 #define RTAS_LOG_DISPOSITION_FULLY_RECOVERED 0x00000000
59 #define RTAS_LOG_DISPOSITION_LIMITED_RECOVERY 0x00080000
60 #define RTAS_LOG_DISPOSITION_NOT_RECOVERED 0x00100000
61 #define RTAS_LOG_OPTIONAL_PART_PRESENT 0x00040000
62 #define RTAS_LOG_INITIATOR_MASK 0x0000f000
63 #define RTAS_LOG_INITIATOR_UNKNOWN 0x00000000
64 #define RTAS_LOG_INITIATOR_CPU 0x00001000
65 #define RTAS_LOG_INITIATOR_PCI 0x00002000
66 #define RTAS_LOG_INITIATOR_MEMORY 0x00004000
67 #define RTAS_LOG_INITIATOR_HOTPLUG 0x00006000
68 #define RTAS_LOG_TARGET_MASK 0x00000f00
69 #define RTAS_LOG_TARGET_UNKNOWN 0x00000000
70 #define RTAS_LOG_TARGET_CPU 0x00000100
71 #define RTAS_LOG_TARGET_PCI 0x00000200
72 #define RTAS_LOG_TARGET_MEMORY 0x00000400
73 #define RTAS_LOG_TARGET_HOTPLUG 0x00000600
74 #define RTAS_LOG_TYPE_MASK 0x000000ff
75 #define RTAS_LOG_TYPE_OTHER 0x00000000
76 #define RTAS_LOG_TYPE_RETRY 0x00000001
77 #define RTAS_LOG_TYPE_TCE_ERR 0x00000002
78 #define RTAS_LOG_TYPE_INTERN_DEV_FAIL 0x00000003
79 #define RTAS_LOG_TYPE_TIMEOUT 0x00000004
80 #define RTAS_LOG_TYPE_DATA_PARITY 0x00000005
81 #define RTAS_LOG_TYPE_ADDR_PARITY 0x00000006
82 #define RTAS_LOG_TYPE_CACHE_PARITY 0x00000007
83 #define RTAS_LOG_TYPE_ADDR_INVALID 0x00000008
84 #define RTAS_LOG_TYPE_ECC_UNCORR 0x00000009
85 #define RTAS_LOG_TYPE_ECC_CORR 0x0000000a
86 #define RTAS_LOG_TYPE_EPOW 0x00000040
87 #define RTAS_LOG_TYPE_HOTPLUG 0x000000e5
88 uint32_t extended_length;
89 } QEMU_PACKED;
90
91 struct rtas_event_log_v6 {
92 uint8_t b0;
93 #define RTAS_LOG_V6_B0_VALID 0x80
94 #define RTAS_LOG_V6_B0_UNRECOVERABLE_ERROR 0x40
95 #define RTAS_LOG_V6_B0_RECOVERABLE_ERROR 0x20
96 #define RTAS_LOG_V6_B0_DEGRADED_OPERATION 0x10
97 #define RTAS_LOG_V6_B0_PREDICTIVE_ERROR 0x08
98 #define RTAS_LOG_V6_B0_NEW_LOG 0x04
99 #define RTAS_LOG_V6_B0_BIGENDIAN 0x02
100 uint8_t _resv1;
101 uint8_t b2;
102 #define RTAS_LOG_V6_B2_POWERPC_FORMAT 0x80
103 #define RTAS_LOG_V6_B2_LOG_FORMAT_MASK 0x0f
104 #define RTAS_LOG_V6_B2_LOG_FORMAT_PLATFORM_EVENT 0x0e
105 uint8_t _resv2[9];
106 uint32_t company;
107 #define RTAS_LOG_V6_COMPANY_IBM 0x49424d00 /* IBM<null> */
108 } QEMU_PACKED;
109
110 struct rtas_event_log_v6_section_header {
111 uint16_t section_id;
112 uint16_t section_length;
113 uint8_t section_version;
114 uint8_t section_subtype;
115 uint16_t creator_component_id;
116 } QEMU_PACKED;
117
118 struct rtas_event_log_v6_maina {
119 #define RTAS_LOG_V6_SECTION_ID_MAINA 0x5048 /* PH */
120 struct rtas_event_log_v6_section_header hdr;
121 uint32_t creation_date; /* BCD: YYYYMMDD */
122 uint32_t creation_time; /* BCD: HHMMSS00 */
123 uint8_t _platform1[8];
124 char creator_id;
125 uint8_t _resv1[2];
126 uint8_t section_count;
127 uint8_t _resv2[4];
128 uint8_t _platform2[8];
129 uint32_t plid;
130 uint8_t _platform3[4];
131 } QEMU_PACKED;
132
133 struct rtas_event_log_v6_mainb {
134 #define RTAS_LOG_V6_SECTION_ID_MAINB 0x5548 /* UH */
135 struct rtas_event_log_v6_section_header hdr;
136 uint8_t subsystem_id;
137 uint8_t _platform1;
138 uint8_t event_severity;
139 uint8_t event_subtype;
140 uint8_t _platform2[4];
141 uint8_t _resv1[2];
142 uint16_t action_flags;
143 uint8_t _resv2[4];
144 } QEMU_PACKED;
145
146 struct rtas_event_log_v6_epow {
147 #define RTAS_LOG_V6_SECTION_ID_EPOW 0x4550 /* EP */
148 struct rtas_event_log_v6_section_header hdr;
149 uint8_t sensor_value;
150 #define RTAS_LOG_V6_EPOW_ACTION_RESET 0
151 #define RTAS_LOG_V6_EPOW_ACTION_WARN_COOLING 1
152 #define RTAS_LOG_V6_EPOW_ACTION_WARN_POWER 2
153 #define RTAS_LOG_V6_EPOW_ACTION_SYSTEM_SHUTDOWN 3
154 #define RTAS_LOG_V6_EPOW_ACTION_SYSTEM_HALT 4
155 #define RTAS_LOG_V6_EPOW_ACTION_MAIN_ENCLOSURE 5
156 #define RTAS_LOG_V6_EPOW_ACTION_POWER_OFF 7
157 uint8_t event_modifier;
158 #define RTAS_LOG_V6_EPOW_MODIFIER_NORMAL 1
159 #define RTAS_LOG_V6_EPOW_MODIFIER_ON_UPS 2
160 #define RTAS_LOG_V6_EPOW_MODIFIER_CRITICAL 3
161 #define RTAS_LOG_V6_EPOW_MODIFIER_TEMPERATURE 4
162 uint8_t extended_modifier;
163 #define RTAS_LOG_V6_EPOW_XMODIFIER_SYSTEM_WIDE 0
164 #define RTAS_LOG_V6_EPOW_XMODIFIER_PARTITION_SPECIFIC 1
165 uint8_t _resv;
166 uint64_t reason_code;
167 } QEMU_PACKED;
168
169 struct epow_log_full {
170 struct rtas_error_log hdr;
171 struct rtas_event_log_v6 v6hdr;
172 struct rtas_event_log_v6_maina maina;
173 struct rtas_event_log_v6_mainb mainb;
174 struct rtas_event_log_v6_epow epow;
175 } QEMU_PACKED;
176
177 union drc_identifier {
178 uint32_t index;
179 uint32_t count;
180 struct {
181 uint32_t count;
182 uint32_t index;
183 } count_indexed;
184 char name[1];
185 } QEMU_PACKED;
186
187 struct rtas_event_log_v6_hp {
188 #define RTAS_LOG_V6_SECTION_ID_HOTPLUG 0x4850 /* HP */
189 struct rtas_event_log_v6_section_header hdr;
190 uint8_t hotplug_type;
191 #define RTAS_LOG_V6_HP_TYPE_CPU 1
192 #define RTAS_LOG_V6_HP_TYPE_MEMORY 2
193 #define RTAS_LOG_V6_HP_TYPE_SLOT 3
194 #define RTAS_LOG_V6_HP_TYPE_PHB 4
195 #define RTAS_LOG_V6_HP_TYPE_PCI 5
196 uint8_t hotplug_action;
197 #define RTAS_LOG_V6_HP_ACTION_ADD 1
198 #define RTAS_LOG_V6_HP_ACTION_REMOVE 2
199 uint8_t hotplug_identifier;
200 #define RTAS_LOG_V6_HP_ID_DRC_NAME 1
201 #define RTAS_LOG_V6_HP_ID_DRC_INDEX 2
202 #define RTAS_LOG_V6_HP_ID_DRC_COUNT 3
203 #define RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED 4
204 uint8_t reserved;
205 union drc_identifier drc_id;
206 } QEMU_PACKED;
207
208 struct hp_log_full {
209 struct rtas_error_log hdr;
210 struct rtas_event_log_v6 v6hdr;
211 struct rtas_event_log_v6_maina maina;
212 struct rtas_event_log_v6_mainb mainb;
213 struct rtas_event_log_v6_hp hp;
214 } QEMU_PACKED;
215
216 typedef enum EventClass {
217 EVENT_CLASS_INTERNAL_ERRORS = 0,
218 EVENT_CLASS_EPOW = 1,
219 EVENT_CLASS_RESERVED = 2,
220 EVENT_CLASS_HOT_PLUG = 3,
221 EVENT_CLASS_IO = 4,
222 EVENT_CLASS_MAX
223 } EventClassIndex;
224 #define EVENT_CLASS_MASK(index) (1 << (31 - index))
225
226 static const char * const event_names[EVENT_CLASS_MAX] = {
227 [EVENT_CLASS_INTERNAL_ERRORS] = "internal-errors",
228 [EVENT_CLASS_EPOW] = "epow-events",
229 [EVENT_CLASS_HOT_PLUG] = "hot-plug-events",
230 [EVENT_CLASS_IO] = "ibm,io-events",
231 };
232
233 struct sPAPREventSource {
234 int irq;
235 uint32_t mask;
236 bool enabled;
237 };
238
239 static sPAPREventSource *spapr_event_sources_new(void)
240 {
241 return g_new0(sPAPREventSource, EVENT_CLASS_MAX);
242 }
243
244 static void spapr_event_sources_register(sPAPREventSource *event_sources,
245 EventClassIndex index, int irq)
246 {
247 /* we only support 1 irq per event class at the moment */
248 g_assert(event_sources);
249 g_assert(!event_sources[index].enabled);
250 event_sources[index].irq = irq;
251 event_sources[index].mask = EVENT_CLASS_MASK(index);
252 event_sources[index].enabled = true;
253 }
254
255 static const sPAPREventSource *
256 spapr_event_sources_get_source(sPAPREventSource *event_sources,
257 EventClassIndex index)
258 {
259 g_assert(index < EVENT_CLASS_MAX);
260 g_assert(event_sources);
261
262 return &event_sources[index];
263 }
264
265 void spapr_dt_events(sPAPRMachineState *spapr, void *fdt)
266 {
267 uint32_t irq_ranges[EVENT_CLASS_MAX * 2];
268 int i, count = 0, event_sources;
269 sPAPREventSource *events = spapr->event_sources;
270
271 g_assert(events);
272
273 _FDT(event_sources = fdt_add_subnode(fdt, 0, "event-sources"));
274
275 for (i = 0, count = 0; i < EVENT_CLASS_MAX; i++) {
276 int node_offset;
277 uint32_t interrupts[2];
278 const sPAPREventSource *source =
279 spapr_event_sources_get_source(events, i);
280 const char *source_name = event_names[i];
281
282 if (!source->enabled) {
283 continue;
284 }
285
286 interrupts[0] = cpu_to_be32(source->irq);
287 interrupts[1] = 0;
288
289 _FDT(node_offset = fdt_add_subnode(fdt, event_sources, source_name));
290 _FDT(fdt_setprop(fdt, node_offset, "interrupts", interrupts,
291 sizeof(interrupts)));
292
293 irq_ranges[count++] = interrupts[0];
294 irq_ranges[count++] = cpu_to_be32(1);
295 }
296
297 irq_ranges[count] = cpu_to_be32(count);
298 count++;
299
300 _FDT((fdt_setprop(fdt, event_sources, "interrupt-controller", NULL, 0)));
301 _FDT((fdt_setprop_cell(fdt, event_sources, "#interrupt-cells", 2)));
302 _FDT((fdt_setprop(fdt, event_sources, "interrupt-ranges",
303 irq_ranges, count * sizeof(uint32_t))));
304 }
305
306 static const sPAPREventSource *
307 rtas_event_log_to_source(sPAPRMachineState *spapr, int log_type)
308 {
309 const sPAPREventSource *source;
310
311 g_assert(spapr->event_sources);
312
313 switch (log_type) {
314 case RTAS_LOG_TYPE_HOTPLUG:
315 source = spapr_event_sources_get_source(spapr->event_sources,
316 EVENT_CLASS_HOT_PLUG);
317 if (spapr_ovec_test(spapr->ov5_cas, OV5_HP_EVT)) {
318 g_assert(source->enabled);
319 break;
320 }
321 /* fall back to epow for legacy hotplug interrupt source */
322 case RTAS_LOG_TYPE_EPOW:
323 source = spapr_event_sources_get_source(spapr->event_sources,
324 EVENT_CLASS_EPOW);
325 break;
326 default:
327 source = NULL;
328 }
329
330 return source;
331 }
332
333 static int rtas_event_log_to_irq(sPAPRMachineState *spapr, int log_type)
334 {
335 const sPAPREventSource *source;
336
337 source = rtas_event_log_to_source(spapr, log_type);
338 g_assert(source);
339 g_assert(source->enabled);
340
341 return source->irq;
342 }
343
344 static void rtas_event_log_queue(int log_type, void *data)
345 {
346 sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
347 sPAPREventLogEntry *entry = g_new(sPAPREventLogEntry, 1);
348
349 g_assert(data);
350 entry->log_type = log_type;
351 entry->data = data;
352 QTAILQ_INSERT_TAIL(&spapr->pending_events, entry, next);
353 }
354
355 static sPAPREventLogEntry *rtas_event_log_dequeue(uint32_t event_mask)
356 {
357 sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
358 sPAPREventLogEntry *entry = NULL;
359
360 QTAILQ_FOREACH(entry, &spapr->pending_events, next) {
361 const sPAPREventSource *source =
362 rtas_event_log_to_source(spapr, entry->log_type);
363
364 if (source->mask & event_mask) {
365 break;
366 }
367 }
368
369 if (entry) {
370 QTAILQ_REMOVE(&spapr->pending_events, entry, next);
371 }
372
373 return entry;
374 }
375
376 static bool rtas_event_log_contains(uint32_t event_mask)
377 {
378 sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
379 sPAPREventLogEntry *entry = NULL;
380
381 QTAILQ_FOREACH(entry, &spapr->pending_events, next) {
382 const sPAPREventSource *source =
383 rtas_event_log_to_source(spapr, entry->log_type);
384
385 if (source->mask & event_mask) {
386 return true;
387 }
388 }
389
390 return false;
391 }
392
393 static uint32_t next_plid;
394
395 static void spapr_init_v6hdr(struct rtas_event_log_v6 *v6hdr)
396 {
397 v6hdr->b0 = RTAS_LOG_V6_B0_VALID | RTAS_LOG_V6_B0_NEW_LOG
398 | RTAS_LOG_V6_B0_BIGENDIAN;
399 v6hdr->b2 = RTAS_LOG_V6_B2_POWERPC_FORMAT
400 | RTAS_LOG_V6_B2_LOG_FORMAT_PLATFORM_EVENT;
401 v6hdr->company = cpu_to_be32(RTAS_LOG_V6_COMPANY_IBM);
402 }
403
404 static void spapr_init_maina(struct rtas_event_log_v6_maina *maina,
405 int section_count)
406 {
407 sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
408 struct tm tm;
409 int year;
410
411 maina->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINA);
412 maina->hdr.section_length = cpu_to_be16(sizeof(*maina));
413 /* FIXME: section version, subtype and creator id? */
414 spapr_rtc_read(&spapr->rtc, &tm, NULL);
415 year = tm.tm_year + 1900;
416 maina->creation_date = cpu_to_be32((to_bcd(year / 100) << 24)
417 | (to_bcd(year % 100) << 16)
418 | (to_bcd(tm.tm_mon + 1) << 8)
419 | to_bcd(tm.tm_mday));
420 maina->creation_time = cpu_to_be32((to_bcd(tm.tm_hour) << 24)
421 | (to_bcd(tm.tm_min) << 16)
422 | (to_bcd(tm.tm_sec) << 8));
423 maina->creator_id = 'H'; /* Hypervisor */
424 maina->section_count = section_count;
425 maina->plid = next_plid++;
426 }
427
428 static void spapr_powerdown_req(Notifier *n, void *opaque)
429 {
430 sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
431 struct rtas_error_log *hdr;
432 struct rtas_event_log_v6 *v6hdr;
433 struct rtas_event_log_v6_maina *maina;
434 struct rtas_event_log_v6_mainb *mainb;
435 struct rtas_event_log_v6_epow *epow;
436 struct epow_log_full *new_epow;
437
438 new_epow = g_malloc0(sizeof(*new_epow));
439 hdr = &new_epow->hdr;
440 v6hdr = &new_epow->v6hdr;
441 maina = &new_epow->maina;
442 mainb = &new_epow->mainb;
443 epow = &new_epow->epow;
444
445 hdr->summary = cpu_to_be32(RTAS_LOG_VERSION_6
446 | RTAS_LOG_SEVERITY_EVENT
447 | RTAS_LOG_DISPOSITION_NOT_RECOVERED
448 | RTAS_LOG_OPTIONAL_PART_PRESENT
449 | RTAS_LOG_TYPE_EPOW);
450 hdr->extended_length = cpu_to_be32(sizeof(*new_epow)
451 - sizeof(new_epow->hdr));
452
453 spapr_init_v6hdr(v6hdr);
454 spapr_init_maina(maina, 3 /* Main-A, Main-B and EPOW */);
455
456 mainb->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINB);
457 mainb->hdr.section_length = cpu_to_be16(sizeof(*mainb));
458 /* FIXME: section version, subtype and creator id? */
459 mainb->subsystem_id = 0xa0; /* External environment */
460 mainb->event_severity = 0x00; /* Informational / non-error */
461 mainb->event_subtype = 0xd0; /* Normal shutdown */
462
463 epow->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_EPOW);
464 epow->hdr.section_length = cpu_to_be16(sizeof(*epow));
465 epow->hdr.section_version = 2; /* includes extended modifier */
466 /* FIXME: section subtype and creator id? */
467 epow->sensor_value = RTAS_LOG_V6_EPOW_ACTION_SYSTEM_SHUTDOWN;
468 epow->event_modifier = RTAS_LOG_V6_EPOW_MODIFIER_NORMAL;
469 epow->extended_modifier = RTAS_LOG_V6_EPOW_XMODIFIER_PARTITION_SPECIFIC;
470
471 rtas_event_log_queue(RTAS_LOG_TYPE_EPOW, new_epow);
472
473 qemu_irq_pulse(xics_get_qirq(XICS_FABRIC(spapr),
474 rtas_event_log_to_irq(spapr,
475 RTAS_LOG_TYPE_EPOW)));
476 }
477
478 static void spapr_hotplug_req_event(uint8_t hp_id, uint8_t hp_action,
479 sPAPRDRConnectorType drc_type,
480 union drc_identifier *drc_id)
481 {
482 sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
483 struct hp_log_full *new_hp;
484 struct rtas_error_log *hdr;
485 struct rtas_event_log_v6 *v6hdr;
486 struct rtas_event_log_v6_maina *maina;
487 struct rtas_event_log_v6_mainb *mainb;
488 struct rtas_event_log_v6_hp *hp;
489
490 new_hp = g_malloc0(sizeof(struct hp_log_full));
491 hdr = &new_hp->hdr;
492 v6hdr = &new_hp->v6hdr;
493 maina = &new_hp->maina;
494 mainb = &new_hp->mainb;
495 hp = &new_hp->hp;
496
497 hdr->summary = cpu_to_be32(RTAS_LOG_VERSION_6
498 | RTAS_LOG_SEVERITY_EVENT
499 | RTAS_LOG_DISPOSITION_NOT_RECOVERED
500 | RTAS_LOG_OPTIONAL_PART_PRESENT
501 | RTAS_LOG_INITIATOR_HOTPLUG
502 | RTAS_LOG_TYPE_HOTPLUG);
503 hdr->extended_length = cpu_to_be32(sizeof(*new_hp)
504 - sizeof(new_hp->hdr));
505
506 spapr_init_v6hdr(v6hdr);
507 spapr_init_maina(maina, 3 /* Main-A, Main-B, HP */);
508
509 mainb->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINB);
510 mainb->hdr.section_length = cpu_to_be16(sizeof(*mainb));
511 mainb->subsystem_id = 0x80; /* External environment */
512 mainb->event_severity = 0x00; /* Informational / non-error */
513 mainb->event_subtype = 0x00; /* Normal shutdown */
514
515 hp->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_HOTPLUG);
516 hp->hdr.section_length = cpu_to_be16(sizeof(*hp));
517 hp->hdr.section_version = 1; /* includes extended modifier */
518 hp->hotplug_action = hp_action;
519 hp->hotplug_identifier = hp_id;
520
521 switch (drc_type) {
522 case SPAPR_DR_CONNECTOR_TYPE_PCI:
523 hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_PCI;
524 break;
525 case SPAPR_DR_CONNECTOR_TYPE_LMB:
526 hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_MEMORY;
527 break;
528 case SPAPR_DR_CONNECTOR_TYPE_CPU:
529 hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_CPU;
530 break;
531 default:
532 /* we shouldn't be signaling hotplug events for resources
533 * that don't support them
534 */
535 g_assert(false);
536 return;
537 }
538
539 if (hp_id == RTAS_LOG_V6_HP_ID_DRC_COUNT) {
540 hp->drc_id.count = cpu_to_be32(drc_id->count);
541 } else if (hp_id == RTAS_LOG_V6_HP_ID_DRC_INDEX) {
542 hp->drc_id.index = cpu_to_be32(drc_id->index);
543 } else if (hp_id == RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED) {
544 /* we should not be using count_indexed value unless the guest
545 * supports dedicated hotplug event source
546 */
547 g_assert(spapr_ovec_test(spapr->ov5_cas, OV5_HP_EVT));
548 hp->drc_id.count_indexed.count =
549 cpu_to_be32(drc_id->count_indexed.count);
550 hp->drc_id.count_indexed.index =
551 cpu_to_be32(drc_id->count_indexed.index);
552 }
553
554 rtas_event_log_queue(RTAS_LOG_TYPE_HOTPLUG, new_hp);
555
556 qemu_irq_pulse(xics_get_qirq(XICS_FABRIC(spapr),
557 rtas_event_log_to_irq(spapr,
558 RTAS_LOG_TYPE_HOTPLUG)));
559 }
560
561 void spapr_hotplug_req_add_by_index(sPAPRDRConnector *drc)
562 {
563 sPAPRDRConnectorType drc_type = spapr_drc_type(drc);
564 union drc_identifier drc_id;
565
566 drc_id.index = spapr_drc_index(drc);
567 spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_INDEX,
568 RTAS_LOG_V6_HP_ACTION_ADD, drc_type, &drc_id);
569 }
570
571 void spapr_hotplug_req_remove_by_index(sPAPRDRConnector *drc)
572 {
573 sPAPRDRConnectorType drc_type = spapr_drc_type(drc);
574 union drc_identifier drc_id;
575
576 drc_id.index = spapr_drc_index(drc);
577 spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_INDEX,
578 RTAS_LOG_V6_HP_ACTION_REMOVE, drc_type, &drc_id);
579 }
580
581 void spapr_hotplug_req_add_by_count(sPAPRDRConnectorType drc_type,
582 uint32_t count)
583 {
584 union drc_identifier drc_id;
585
586 drc_id.count = count;
587 spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT,
588 RTAS_LOG_V6_HP_ACTION_ADD, drc_type, &drc_id);
589 }
590
591 void spapr_hotplug_req_remove_by_count(sPAPRDRConnectorType drc_type,
592 uint32_t count)
593 {
594 union drc_identifier drc_id;
595
596 drc_id.count = count;
597 spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT,
598 RTAS_LOG_V6_HP_ACTION_REMOVE, drc_type, &drc_id);
599 }
600
601 void spapr_hotplug_req_add_by_count_indexed(sPAPRDRConnectorType drc_type,
602 uint32_t count, uint32_t index)
603 {
604 union drc_identifier drc_id;
605
606 drc_id.count_indexed.count = count;
607 drc_id.count_indexed.index = index;
608 spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED,
609 RTAS_LOG_V6_HP_ACTION_ADD, drc_type, &drc_id);
610 }
611
612 void spapr_hotplug_req_remove_by_count_indexed(sPAPRDRConnectorType drc_type,
613 uint32_t count, uint32_t index)
614 {
615 union drc_identifier drc_id;
616
617 drc_id.count_indexed.count = count;
618 drc_id.count_indexed.index = index;
619 spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED,
620 RTAS_LOG_V6_HP_ACTION_REMOVE, drc_type, &drc_id);
621 }
622
623 static void check_exception(PowerPCCPU *cpu, sPAPRMachineState *spapr,
624 uint32_t token, uint32_t nargs,
625 target_ulong args,
626 uint32_t nret, target_ulong rets)
627 {
628 uint32_t mask, buf, len, event_len;
629 uint64_t xinfo;
630 sPAPREventLogEntry *event;
631 struct rtas_error_log *hdr;
632 int i;
633
634 if ((nargs < 6) || (nargs > 7) || nret != 1) {
635 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
636 return;
637 }
638
639 xinfo = rtas_ld(args, 1);
640 mask = rtas_ld(args, 2);
641 buf = rtas_ld(args, 4);
642 len = rtas_ld(args, 5);
643 if (nargs == 7) {
644 xinfo |= (uint64_t)rtas_ld(args, 6) << 32;
645 }
646
647 event = rtas_event_log_dequeue(mask);
648 if (!event) {
649 goto out_no_events;
650 }
651
652 hdr = event->data;
653 event_len = be32_to_cpu(hdr->extended_length) + sizeof(*hdr);
654
655 if (event_len < len) {
656 len = event_len;
657 }
658
659 cpu_physical_memory_write(buf, event->data, len);
660 rtas_st(rets, 0, RTAS_OUT_SUCCESS);
661 g_free(event->data);
662 g_free(event);
663
664 /* according to PAPR+, the IRQ must be left asserted, or re-asserted, if
665 * there are still pending events to be fetched via check-exception. We
666 * do the latter here, since our code relies on edge-triggered
667 * interrupts.
668 */
669 for (i = 0; i < EVENT_CLASS_MAX; i++) {
670 if (rtas_event_log_contains(EVENT_CLASS_MASK(i))) {
671 const sPAPREventSource *source =
672 spapr_event_sources_get_source(spapr->event_sources, i);
673
674 g_assert(source->enabled);
675 qemu_irq_pulse(xics_get_qirq(XICS_FABRIC(spapr), source->irq));
676 }
677 }
678
679 return;
680
681 out_no_events:
682 rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND);
683 }
684
685 static void event_scan(PowerPCCPU *cpu, sPAPRMachineState *spapr,
686 uint32_t token, uint32_t nargs,
687 target_ulong args,
688 uint32_t nret, target_ulong rets)
689 {
690 if (nargs != 4 || nret != 1) {
691 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
692 return;
693 }
694 rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND);
695 }
696
697 void spapr_events_init(sPAPRMachineState *spapr)
698 {
699 QTAILQ_INIT(&spapr->pending_events);
700
701 spapr->event_sources = spapr_event_sources_new();
702
703 spapr_event_sources_register(spapr->event_sources, EVENT_CLASS_EPOW,
704 spapr_ics_alloc(spapr->ics, 0, false,
705 &error_fatal));
706
707 /* NOTE: if machine supports modern/dedicated hotplug event source,
708 * we add it to the device-tree unconditionally. This means we may
709 * have cases where the source is enabled in QEMU, but unused by the
710 * guest because it does not support modern hotplug events, so we
711 * take care to rely on checking for negotiation of OV5_HP_EVT option
712 * before attempting to use it to signal events, rather than simply
713 * checking that it's enabled.
714 */
715 if (spapr->use_hotplug_event_source) {
716 spapr_event_sources_register(spapr->event_sources, EVENT_CLASS_HOT_PLUG,
717 spapr_ics_alloc(spapr->ics, 0, false,
718 &error_fatal));
719 }
720
721 spapr->epow_notifier.notify = spapr_powerdown_req;
722 qemu_register_powerdown_notifier(&spapr->epow_notifier);
723 spapr_rtas_register(RTAS_CHECK_EXCEPTION, "check-exception",
724 check_exception);
725 spapr_rtas_register(RTAS_EVENT_SCAN, "event-scan", event_scan);
726 }