]> git.proxmox.com Git - mirror_qemu.git/blob - hw/s390x/event-facility.c
qom: Drop parameter @errp of object_property_add() & friends
[mirror_qemu.git] / hw / s390x / event-facility.c
1 /*
2 * SCLP
3 * Event Facility
4 * handles SCLP event types
5 * - Signal Quiesce - system power down
6 * - ASCII Console Data - VT220 read and write
7 *
8 * Copyright IBM, Corp. 2012
9 *
10 * Authors:
11 * Heinz Graalfs <graalfs@de.ibm.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2 or (at your
14 * option) any later version. See the COPYING file in the top-level directory.
15 *
16 */
17
18 #include "qemu/osdep.h"
19 #include "qapi/error.h"
20 #include "qemu/module.h"
21
22 #include "hw/s390x/sclp.h"
23 #include "migration/vmstate.h"
24 #include "hw/s390x/event-facility.h"
25
26 typedef struct SCLPEventsBus {
27 BusState qbus;
28 } SCLPEventsBus;
29
30 /* we need to save 32 bit chunks for compatibility */
31 #ifdef HOST_WORDS_BIGENDIAN
32 #define RECV_MASK_LOWER 1
33 #define RECV_MASK_UPPER 0
34 #else /* little endian host */
35 #define RECV_MASK_LOWER 0
36 #define RECV_MASK_UPPER 1
37 #endif
38
39 struct SCLPEventFacility {
40 SysBusDevice parent_obj;
41 SCLPEventsBus sbus;
42 /* guest's receive mask */
43 union {
44 uint32_t receive_mask_pieces[2];
45 sccb_mask_t receive_mask;
46 };
47 /*
48 * when false, we keep the same broken, backwards compatible behaviour as
49 * before, allowing only masks of size exactly 4; when true, we implement
50 * the architecture correctly, allowing all valid mask sizes. Needed for
51 * migration toward older versions.
52 */
53 bool allow_all_mask_sizes;
54 /* length of the receive mask */
55 uint16_t mask_length;
56 };
57
58 /* return true if any child has event pending set */
59 static bool event_pending(SCLPEventFacility *ef)
60 {
61 BusChild *kid;
62 SCLPEvent *event;
63 SCLPEventClass *event_class;
64
65 QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) {
66 DeviceState *qdev = kid->child;
67 event = DO_UPCAST(SCLPEvent, qdev, qdev);
68 event_class = SCLP_EVENT_GET_CLASS(event);
69 if (event->event_pending &&
70 event_class->get_send_mask() & ef->receive_mask) {
71 return true;
72 }
73 }
74 return false;
75 }
76
77 static sccb_mask_t get_host_send_mask(SCLPEventFacility *ef)
78 {
79 sccb_mask_t mask;
80 BusChild *kid;
81 SCLPEventClass *child;
82
83 mask = 0;
84
85 QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) {
86 DeviceState *qdev = kid->child;
87 child = SCLP_EVENT_GET_CLASS((SCLPEvent *) qdev);
88 mask |= child->get_send_mask();
89 }
90 return mask;
91 }
92
93 static sccb_mask_t get_host_receive_mask(SCLPEventFacility *ef)
94 {
95 sccb_mask_t mask;
96 BusChild *kid;
97 SCLPEventClass *child;
98
99 mask = 0;
100
101 QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) {
102 DeviceState *qdev = kid->child;
103 child = SCLP_EVENT_GET_CLASS((SCLPEvent *) qdev);
104 mask |= child->get_receive_mask();
105 }
106 return mask;
107 }
108
109 static uint16_t write_event_length_check(SCCB *sccb)
110 {
111 int slen;
112 unsigned elen = 0;
113 EventBufferHeader *event;
114 WriteEventData *wed = (WriteEventData *) sccb;
115
116 event = (EventBufferHeader *) &wed->ebh;
117 for (slen = sccb_data_len(sccb); slen > 0; slen -= elen) {
118 elen = be16_to_cpu(event->length);
119 if (elen < sizeof(*event) || elen > slen) {
120 return SCLP_RC_EVENT_BUFFER_SYNTAX_ERROR;
121 }
122 event = (void *) event + elen;
123 }
124 if (slen) {
125 return SCLP_RC_INCONSISTENT_LENGTHS;
126 }
127 return SCLP_RC_NORMAL_COMPLETION;
128 }
129
130 static uint16_t handle_write_event_buf(SCLPEventFacility *ef,
131 EventBufferHeader *event_buf, SCCB *sccb)
132 {
133 uint16_t rc;
134 BusChild *kid;
135 SCLPEvent *event;
136 SCLPEventClass *ec;
137
138 rc = SCLP_RC_INVALID_FUNCTION;
139
140 QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) {
141 DeviceState *qdev = kid->child;
142 event = (SCLPEvent *) qdev;
143 ec = SCLP_EVENT_GET_CLASS(event);
144
145 if (ec->write_event_data &&
146 ec->can_handle_event(event_buf->type)) {
147 rc = ec->write_event_data(event, event_buf);
148 break;
149 }
150 }
151 return rc;
152 }
153
154 static uint16_t handle_sccb_write_events(SCLPEventFacility *ef, SCCB *sccb)
155 {
156 uint16_t rc;
157 int slen;
158 unsigned elen = 0;
159 EventBufferHeader *event_buf;
160 WriteEventData *wed = (WriteEventData *) sccb;
161
162 event_buf = &wed->ebh;
163 rc = SCLP_RC_NORMAL_COMPLETION;
164
165 /* loop over all contained event buffers */
166 for (slen = sccb_data_len(sccb); slen > 0; slen -= elen) {
167 elen = be16_to_cpu(event_buf->length);
168
169 /* in case of a previous error mark all trailing buffers
170 * as not accepted */
171 if (rc != SCLP_RC_NORMAL_COMPLETION) {
172 event_buf->flags &= ~(SCLP_EVENT_BUFFER_ACCEPTED);
173 } else {
174 rc = handle_write_event_buf(ef, event_buf, sccb);
175 }
176 event_buf = (void *) event_buf + elen;
177 }
178 return rc;
179 }
180
181 static void write_event_data(SCLPEventFacility *ef, SCCB *sccb)
182 {
183 if (sccb->h.function_code != SCLP_FC_NORMAL_WRITE) {
184 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_FUNCTION);
185 return;
186 }
187 if (be16_to_cpu(sccb->h.length) < 8) {
188 sccb->h.response_code = cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH);
189 return;
190 }
191 /* first do a sanity check of the write events */
192 sccb->h.response_code = cpu_to_be16(write_event_length_check(sccb));
193
194 /* if no early error, then execute */
195 if (sccb->h.response_code == be16_to_cpu(SCLP_RC_NORMAL_COMPLETION)) {
196 sccb->h.response_code =
197 cpu_to_be16(handle_sccb_write_events(ef, sccb));
198 }
199 }
200
201 static uint16_t handle_sccb_read_events(SCLPEventFacility *ef, SCCB *sccb,
202 sccb_mask_t mask)
203 {
204 uint16_t rc;
205 int slen;
206 unsigned elen;
207 BusChild *kid;
208 SCLPEvent *event;
209 SCLPEventClass *ec;
210 EventBufferHeader *event_buf;
211 ReadEventData *red = (ReadEventData *) sccb;
212
213 event_buf = &red->ebh;
214 event_buf->length = 0;
215 slen = sizeof(sccb->data);
216
217 rc = SCLP_RC_NO_EVENT_BUFFERS_STORED;
218
219 QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) {
220 DeviceState *qdev = kid->child;
221 event = (SCLPEvent *) qdev;
222 ec = SCLP_EVENT_GET_CLASS(event);
223
224 if (mask & ec->get_send_mask()) {
225 if (ec->read_event_data(event, event_buf, &slen)) {
226 elen = be16_to_cpu(event_buf->length);
227 event_buf = (EventBufferHeader *) ((char *)event_buf + elen);
228 rc = SCLP_RC_NORMAL_COMPLETION;
229 }
230 }
231 }
232
233 if (sccb->h.control_mask[2] & SCLP_VARIABLE_LENGTH_RESPONSE) {
234 /* architecture suggests to reset variable-length-response bit */
235 sccb->h.control_mask[2] &= ~SCLP_VARIABLE_LENGTH_RESPONSE;
236 /* with a new length value */
237 sccb->h.length = cpu_to_be16(SCCB_SIZE - slen);
238 }
239 return rc;
240 }
241
242 /* copy up to src_len bytes and fill the rest of dst with zeroes */
243 static void copy_mask(uint8_t *dst, uint8_t *src, uint16_t dst_len,
244 uint16_t src_len)
245 {
246 int i;
247
248 for (i = 0; i < dst_len; i++) {
249 dst[i] = i < src_len ? src[i] : 0;
250 }
251 }
252
253 static void read_event_data(SCLPEventFacility *ef, SCCB *sccb)
254 {
255 sccb_mask_t sclp_active_selection_mask;
256 sccb_mask_t sclp_cp_receive_mask;
257
258 ReadEventData *red = (ReadEventData *) sccb;
259
260 if (be16_to_cpu(sccb->h.length) != SCCB_SIZE) {
261 sccb->h.response_code = cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH);
262 return;
263 }
264
265 switch (sccb->h.function_code) {
266 case SCLP_UNCONDITIONAL_READ:
267 sccb->h.response_code = cpu_to_be16(
268 handle_sccb_read_events(ef, sccb, ef->receive_mask));
269 break;
270 case SCLP_SELECTIVE_READ:
271 /* get active selection mask */
272 sclp_cp_receive_mask = ef->receive_mask;
273
274 copy_mask((uint8_t *)&sclp_active_selection_mask, (uint8_t *)&red->mask,
275 sizeof(sclp_active_selection_mask), ef->mask_length);
276 sclp_active_selection_mask = be64_to_cpu(sclp_active_selection_mask);
277 if (!sclp_cp_receive_mask ||
278 (sclp_active_selection_mask & ~sclp_cp_receive_mask)) {
279 sccb->h.response_code =
280 cpu_to_be16(SCLP_RC_INVALID_SELECTION_MASK);
281 } else {
282 sccb->h.response_code = cpu_to_be16(
283 handle_sccb_read_events(ef, sccb, sclp_active_selection_mask));
284 }
285 break;
286 default:
287 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_FUNCTION);
288 }
289 }
290
291 static void write_event_mask(SCLPEventFacility *ef, SCCB *sccb)
292 {
293 WriteEventMask *we_mask = (WriteEventMask *) sccb;
294 uint16_t mask_length = be16_to_cpu(we_mask->mask_length);
295 sccb_mask_t tmp_mask;
296
297 if (!mask_length || (mask_length > SCLP_EVENT_MASK_LEN_MAX) ||
298 ((mask_length != 4) && !ef->allow_all_mask_sizes)) {
299 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_MASK_LENGTH);
300 return;
301 }
302
303 /*
304 * Note: We currently only support masks up to 8 byte length;
305 * the remainder is filled up with zeroes. Older Linux
306 * kernels use a 4 byte mask length, newer ones can use both
307 * 8 or 4 depending on what is available on the host.
308 */
309
310 /* keep track of the guest's capability masks */
311 copy_mask((uint8_t *)&tmp_mask, WEM_CP_RECEIVE_MASK(we_mask, mask_length),
312 sizeof(tmp_mask), mask_length);
313 ef->receive_mask = be64_to_cpu(tmp_mask);
314
315 /* return the SCLP's capability masks to the guest */
316 tmp_mask = cpu_to_be64(get_host_receive_mask(ef));
317 copy_mask(WEM_RECEIVE_MASK(we_mask, mask_length), (uint8_t *)&tmp_mask,
318 mask_length, sizeof(tmp_mask));
319 tmp_mask = cpu_to_be64(get_host_send_mask(ef));
320 copy_mask(WEM_SEND_MASK(we_mask, mask_length), (uint8_t *)&tmp_mask,
321 mask_length, sizeof(tmp_mask));
322
323 sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION);
324 ef->mask_length = mask_length;
325 }
326
327 /* qemu object creation and initialization functions */
328
329 #define TYPE_SCLP_EVENTS_BUS "s390-sclp-events-bus"
330
331 static void sclp_events_bus_realize(BusState *bus, Error **errp)
332 {
333 Error *err = NULL;
334 BusChild *kid;
335
336 /* TODO: recursive realization has to be done in common code */
337 QTAILQ_FOREACH(kid, &bus->children, sibling) {
338 DeviceState *dev = kid->child;
339
340 object_property_set_bool(OBJECT(dev), true, "realized", &err);
341 if (err) {
342 error_propagate(errp, err);
343 return;
344 }
345 }
346 }
347
348 static void sclp_events_bus_class_init(ObjectClass *klass, void *data)
349 {
350 BusClass *bc = BUS_CLASS(klass);
351
352 bc->realize = sclp_events_bus_realize;
353 }
354
355 static const TypeInfo sclp_events_bus_info = {
356 .name = TYPE_SCLP_EVENTS_BUS,
357 .parent = TYPE_BUS,
358 .class_init = sclp_events_bus_class_init,
359 };
360
361 static void command_handler(SCLPEventFacility *ef, SCCB *sccb, uint64_t code)
362 {
363 switch (code & SCLP_CMD_CODE_MASK) {
364 case SCLP_CMD_READ_EVENT_DATA:
365 read_event_data(ef, sccb);
366 break;
367 case SCLP_CMD_WRITE_EVENT_DATA:
368 write_event_data(ef, sccb);
369 break;
370 case SCLP_CMD_WRITE_EVENT_MASK:
371 write_event_mask(ef, sccb);
372 break;
373 }
374 }
375
376 static bool vmstate_event_facility_mask64_needed(void *opaque)
377 {
378 SCLPEventFacility *ef = opaque;
379
380 return (ef->receive_mask & 0xFFFFFFFF) != 0;
381 }
382
383 static bool vmstate_event_facility_mask_length_needed(void *opaque)
384 {
385 SCLPEventFacility *ef = opaque;
386
387 return ef->allow_all_mask_sizes;
388 }
389
390 static const VMStateDescription vmstate_event_facility_mask64 = {
391 .name = "vmstate-event-facility/mask64",
392 .version_id = 0,
393 .minimum_version_id = 0,
394 .needed = vmstate_event_facility_mask64_needed,
395 .fields = (VMStateField[]) {
396 VMSTATE_UINT32(receive_mask_pieces[RECV_MASK_LOWER], SCLPEventFacility),
397 VMSTATE_END_OF_LIST()
398 }
399 };
400
401 static const VMStateDescription vmstate_event_facility_mask_length = {
402 .name = "vmstate-event-facility/mask_length",
403 .version_id = 0,
404 .minimum_version_id = 0,
405 .needed = vmstate_event_facility_mask_length_needed,
406 .fields = (VMStateField[]) {
407 VMSTATE_UINT16(mask_length, SCLPEventFacility),
408 VMSTATE_END_OF_LIST()
409 }
410 };
411
412 static const VMStateDescription vmstate_event_facility = {
413 .name = "vmstate-event-facility",
414 .version_id = 0,
415 .minimum_version_id = 0,
416 .fields = (VMStateField[]) {
417 VMSTATE_UINT32(receive_mask_pieces[RECV_MASK_UPPER], SCLPEventFacility),
418 VMSTATE_END_OF_LIST()
419 },
420 .subsections = (const VMStateDescription * []) {
421 &vmstate_event_facility_mask64,
422 &vmstate_event_facility_mask_length,
423 NULL
424 }
425 };
426
427 static void sclp_event_set_allow_all_mask_sizes(Object *obj, bool value,
428 Error **errp)
429 {
430 SCLPEventFacility *ef = (SCLPEventFacility *)obj;
431
432 ef->allow_all_mask_sizes = value;
433 }
434
435 static bool sclp_event_get_allow_all_mask_sizes(Object *obj, Error **errp)
436 {
437 SCLPEventFacility *ef = (SCLPEventFacility *)obj;
438
439 return ef->allow_all_mask_sizes;
440 }
441
442 static void init_event_facility(Object *obj)
443 {
444 SCLPEventFacility *event_facility = EVENT_FACILITY(obj);
445 DeviceState *sdev = DEVICE(obj);
446 Object *new;
447
448 event_facility->mask_length = 4;
449 event_facility->allow_all_mask_sizes = true;
450 object_property_add_bool(obj, "allow_all_mask_sizes",
451 sclp_event_get_allow_all_mask_sizes,
452 sclp_event_set_allow_all_mask_sizes);
453 /* Spawn a new bus for SCLP events */
454 qbus_create_inplace(&event_facility->sbus, sizeof(event_facility->sbus),
455 TYPE_SCLP_EVENTS_BUS, sdev, NULL);
456
457 new = object_new(TYPE_SCLP_QUIESCE);
458 object_property_add_child(obj, TYPE_SCLP_QUIESCE, new);
459 object_unref(new);
460 qdev_set_parent_bus(DEVICE(new), BUS(&event_facility->sbus));
461
462 new = object_new(TYPE_SCLP_CPU_HOTPLUG);
463 object_property_add_child(obj, TYPE_SCLP_CPU_HOTPLUG, new);
464 object_unref(new);
465 qdev_set_parent_bus(DEVICE(new), BUS(&event_facility->sbus));
466 /* the facility will automatically realize the devices via the bus */
467 }
468
469 static void reset_event_facility(DeviceState *dev)
470 {
471 SCLPEventFacility *sdev = EVENT_FACILITY(dev);
472
473 sdev->receive_mask = 0;
474 }
475
476 static void init_event_facility_class(ObjectClass *klass, void *data)
477 {
478 SysBusDeviceClass *sbdc = SYS_BUS_DEVICE_CLASS(klass);
479 DeviceClass *dc = DEVICE_CLASS(sbdc);
480 SCLPEventFacilityClass *k = EVENT_FACILITY_CLASS(dc);
481
482 dc->reset = reset_event_facility;
483 dc->vmsd = &vmstate_event_facility;
484 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
485 k->command_handler = command_handler;
486 k->event_pending = event_pending;
487 }
488
489 static const TypeInfo sclp_event_facility_info = {
490 .name = TYPE_SCLP_EVENT_FACILITY,
491 .parent = TYPE_SYS_BUS_DEVICE,
492 .instance_init = init_event_facility,
493 .instance_size = sizeof(SCLPEventFacility),
494 .class_init = init_event_facility_class,
495 .class_size = sizeof(SCLPEventFacilityClass),
496 };
497
498 static void event_realize(DeviceState *qdev, Error **errp)
499 {
500 SCLPEvent *event = SCLP_EVENT(qdev);
501 SCLPEventClass *child = SCLP_EVENT_GET_CLASS(event);
502
503 if (child->init) {
504 int rc = child->init(event);
505 if (rc < 0) {
506 error_setg(errp, "SCLP event initialization failed.");
507 return;
508 }
509 }
510 }
511
512 static void event_class_init(ObjectClass *klass, void *data)
513 {
514 DeviceClass *dc = DEVICE_CLASS(klass);
515
516 dc->bus_type = TYPE_SCLP_EVENTS_BUS;
517 dc->realize = event_realize;
518 }
519
520 static const TypeInfo sclp_event_type_info = {
521 .name = TYPE_SCLP_EVENT,
522 .parent = TYPE_DEVICE,
523 .instance_size = sizeof(SCLPEvent),
524 .class_init = event_class_init,
525 .class_size = sizeof(SCLPEventClass),
526 .abstract = true,
527 };
528
529 static void register_types(void)
530 {
531 type_register_static(&sclp_events_bus_info);
532 type_register_static(&sclp_event_facility_info);
533 type_register_static(&sclp_event_type_info);
534 }
535
536 type_init(register_types)
537
538 BusState *sclp_get_event_facility_bus(void)
539 {
540 Object *busobj;
541 SCLPEventsBus *sbus;
542
543 busobj = object_resolve_path_type("", TYPE_SCLP_EVENTS_BUS, NULL);
544 sbus = OBJECT_CHECK(SCLPEventsBus, busobj, TYPE_SCLP_EVENTS_BUS);
545 if (!sbus) {
546 return NULL;
547 }
548
549 return &sbus->qbus;
550 }