4 * handles SCLP event types
5 * - Signal Quiesce - system power down
6 * - ASCII Console Data - VT220 read and write
8 * Copyright IBM, Corp. 2012
11 * Heinz Graalfs <graalfs@de.ibm.com>
13 * This work is licensed under the terms of the GNU GPL, version 2 or (at your
14 * option) any later version. See the COPYING file in the top-level directory.
18 #include "qemu/osdep.h"
19 #include "qapi/error.h"
20 #include "qemu/module.h"
22 #include "hw/s390x/sclp.h"
23 #include "migration/vmstate.h"
24 #include "hw/s390x/event-facility.h"
26 typedef struct SCLPEventsBus
{
30 /* we need to save 32 bit chunks for compatibility */
31 #ifdef HOST_WORDS_BIGENDIAN
32 #define RECV_MASK_LOWER 1
33 #define RECV_MASK_UPPER 0
34 #else /* little endian host */
35 #define RECV_MASK_LOWER 0
36 #define RECV_MASK_UPPER 1
39 struct SCLPEventFacility
{
40 SysBusDevice parent_obj
;
42 /* guest's receive mask */
44 uint32_t receive_mask_pieces
[2];
45 sccb_mask_t receive_mask
;
48 * when false, we keep the same broken, backwards compatible behaviour as
49 * before, allowing only masks of size exactly 4; when true, we implement
50 * the architecture correctly, allowing all valid mask sizes. Needed for
51 * migration toward older versions.
53 bool allow_all_mask_sizes
;
54 /* length of the receive mask */
58 /* return true if any child has event pending set */
59 static bool event_pending(SCLPEventFacility
*ef
)
63 SCLPEventClass
*event_class
;
65 QTAILQ_FOREACH(kid
, &ef
->sbus
.qbus
.children
, sibling
) {
66 DeviceState
*qdev
= kid
->child
;
67 event
= DO_UPCAST(SCLPEvent
, qdev
, qdev
);
68 event_class
= SCLP_EVENT_GET_CLASS(event
);
69 if (event
->event_pending
&&
70 event_class
->get_send_mask() & ef
->receive_mask
) {
77 static sccb_mask_t
get_host_send_mask(SCLPEventFacility
*ef
)
81 SCLPEventClass
*child
;
85 QTAILQ_FOREACH(kid
, &ef
->sbus
.qbus
.children
, sibling
) {
86 DeviceState
*qdev
= kid
->child
;
87 child
= SCLP_EVENT_GET_CLASS((SCLPEvent
*) qdev
);
88 mask
|= child
->get_send_mask();
93 static sccb_mask_t
get_host_receive_mask(SCLPEventFacility
*ef
)
97 SCLPEventClass
*child
;
101 QTAILQ_FOREACH(kid
, &ef
->sbus
.qbus
.children
, sibling
) {
102 DeviceState
*qdev
= kid
->child
;
103 child
= SCLP_EVENT_GET_CLASS((SCLPEvent
*) qdev
);
104 mask
|= child
->get_receive_mask();
109 static uint16_t write_event_length_check(SCCB
*sccb
)
113 EventBufferHeader
*event
;
114 WriteEventData
*wed
= (WriteEventData
*) sccb
;
116 event
= (EventBufferHeader
*) &wed
->ebh
;
117 for (slen
= sccb_data_len(sccb
); slen
> 0; slen
-= elen
) {
118 elen
= be16_to_cpu(event
->length
);
119 if (elen
< sizeof(*event
) || elen
> slen
) {
120 return SCLP_RC_EVENT_BUFFER_SYNTAX_ERROR
;
122 event
= (void *) event
+ elen
;
125 return SCLP_RC_INCONSISTENT_LENGTHS
;
127 return SCLP_RC_NORMAL_COMPLETION
;
130 static uint16_t handle_write_event_buf(SCLPEventFacility
*ef
,
131 EventBufferHeader
*event_buf
, SCCB
*sccb
)
138 rc
= SCLP_RC_INVALID_FUNCTION
;
140 QTAILQ_FOREACH(kid
, &ef
->sbus
.qbus
.children
, sibling
) {
141 DeviceState
*qdev
= kid
->child
;
142 event
= (SCLPEvent
*) qdev
;
143 ec
= SCLP_EVENT_GET_CLASS(event
);
145 if (ec
->write_event_data
&&
146 ec
->can_handle_event(event_buf
->type
)) {
147 rc
= ec
->write_event_data(event
, event_buf
);
154 static uint16_t handle_sccb_write_events(SCLPEventFacility
*ef
, SCCB
*sccb
)
159 EventBufferHeader
*event_buf
;
160 WriteEventData
*wed
= (WriteEventData
*) sccb
;
162 event_buf
= &wed
->ebh
;
163 rc
= SCLP_RC_NORMAL_COMPLETION
;
165 /* loop over all contained event buffers */
166 for (slen
= sccb_data_len(sccb
); slen
> 0; slen
-= elen
) {
167 elen
= be16_to_cpu(event_buf
->length
);
169 /* in case of a previous error mark all trailing buffers
171 if (rc
!= SCLP_RC_NORMAL_COMPLETION
) {
172 event_buf
->flags
&= ~(SCLP_EVENT_BUFFER_ACCEPTED
);
174 rc
= handle_write_event_buf(ef
, event_buf
, sccb
);
176 event_buf
= (void *) event_buf
+ elen
;
181 static void write_event_data(SCLPEventFacility
*ef
, SCCB
*sccb
)
183 if (sccb
->h
.function_code
!= SCLP_FC_NORMAL_WRITE
) {
184 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_FUNCTION
);
187 if (be16_to_cpu(sccb
->h
.length
) < 8) {
188 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH
);
191 /* first do a sanity check of the write events */
192 sccb
->h
.response_code
= cpu_to_be16(write_event_length_check(sccb
));
194 /* if no early error, then execute */
195 if (sccb
->h
.response_code
== be16_to_cpu(SCLP_RC_NORMAL_COMPLETION
)) {
196 sccb
->h
.response_code
=
197 cpu_to_be16(handle_sccb_write_events(ef
, sccb
));
201 static uint16_t handle_sccb_read_events(SCLPEventFacility
*ef
, SCCB
*sccb
,
210 EventBufferHeader
*event_buf
;
211 ReadEventData
*red
= (ReadEventData
*) sccb
;
213 event_buf
= &red
->ebh
;
214 event_buf
->length
= 0;
215 slen
= sizeof(sccb
->data
);
217 rc
= SCLP_RC_NO_EVENT_BUFFERS_STORED
;
219 QTAILQ_FOREACH(kid
, &ef
->sbus
.qbus
.children
, sibling
) {
220 DeviceState
*qdev
= kid
->child
;
221 event
= (SCLPEvent
*) qdev
;
222 ec
= SCLP_EVENT_GET_CLASS(event
);
224 if (mask
& ec
->get_send_mask()) {
225 if (ec
->read_event_data(event
, event_buf
, &slen
)) {
226 elen
= be16_to_cpu(event_buf
->length
);
227 event_buf
= (EventBufferHeader
*) ((char *)event_buf
+ elen
);
228 rc
= SCLP_RC_NORMAL_COMPLETION
;
233 if (sccb
->h
.control_mask
[2] & SCLP_VARIABLE_LENGTH_RESPONSE
) {
234 /* architecture suggests to reset variable-length-response bit */
235 sccb
->h
.control_mask
[2] &= ~SCLP_VARIABLE_LENGTH_RESPONSE
;
236 /* with a new length value */
237 sccb
->h
.length
= cpu_to_be16(SCCB_SIZE
- slen
);
242 /* copy up to src_len bytes and fill the rest of dst with zeroes */
243 static void copy_mask(uint8_t *dst
, uint8_t *src
, uint16_t dst_len
,
248 for (i
= 0; i
< dst_len
; i
++) {
249 dst
[i
] = i
< src_len
? src
[i
] : 0;
253 static void read_event_data(SCLPEventFacility
*ef
, SCCB
*sccb
)
255 sccb_mask_t sclp_active_selection_mask
;
256 sccb_mask_t sclp_cp_receive_mask
;
258 ReadEventData
*red
= (ReadEventData
*) sccb
;
260 if (be16_to_cpu(sccb
->h
.length
) != SCCB_SIZE
) {
261 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH
);
265 switch (sccb
->h
.function_code
) {
266 case SCLP_UNCONDITIONAL_READ
:
267 sccb
->h
.response_code
= cpu_to_be16(
268 handle_sccb_read_events(ef
, sccb
, ef
->receive_mask
));
270 case SCLP_SELECTIVE_READ
:
271 /* get active selection mask */
272 sclp_cp_receive_mask
= ef
->receive_mask
;
274 copy_mask((uint8_t *)&sclp_active_selection_mask
, (uint8_t *)&red
->mask
,
275 sizeof(sclp_active_selection_mask
), ef
->mask_length
);
276 sclp_active_selection_mask
= be64_to_cpu(sclp_active_selection_mask
);
277 if (!sclp_cp_receive_mask
||
278 (sclp_active_selection_mask
& ~sclp_cp_receive_mask
)) {
279 sccb
->h
.response_code
=
280 cpu_to_be16(SCLP_RC_INVALID_SELECTION_MASK
);
282 sccb
->h
.response_code
= cpu_to_be16(
283 handle_sccb_read_events(ef
, sccb
, sclp_active_selection_mask
));
287 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_FUNCTION
);
291 static void write_event_mask(SCLPEventFacility
*ef
, SCCB
*sccb
)
293 WriteEventMask
*we_mask
= (WriteEventMask
*) sccb
;
294 uint16_t mask_length
= be16_to_cpu(we_mask
->mask_length
);
295 sccb_mask_t tmp_mask
;
297 if (!mask_length
|| (mask_length
> SCLP_EVENT_MASK_LEN_MAX
) ||
298 ((mask_length
!= 4) && !ef
->allow_all_mask_sizes
)) {
299 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_MASK_LENGTH
);
304 * Note: We currently only support masks up to 8 byte length;
305 * the remainder is filled up with zeroes. Older Linux
306 * kernels use a 4 byte mask length, newer ones can use both
307 * 8 or 4 depending on what is available on the host.
310 /* keep track of the guest's capability masks */
311 copy_mask((uint8_t *)&tmp_mask
, WEM_CP_RECEIVE_MASK(we_mask
, mask_length
),
312 sizeof(tmp_mask
), mask_length
);
313 ef
->receive_mask
= be64_to_cpu(tmp_mask
);
315 /* return the SCLP's capability masks to the guest */
316 tmp_mask
= cpu_to_be64(get_host_receive_mask(ef
));
317 copy_mask(WEM_RECEIVE_MASK(we_mask
, mask_length
), (uint8_t *)&tmp_mask
,
318 mask_length
, sizeof(tmp_mask
));
319 tmp_mask
= cpu_to_be64(get_host_send_mask(ef
));
320 copy_mask(WEM_SEND_MASK(we_mask
, mask_length
), (uint8_t *)&tmp_mask
,
321 mask_length
, sizeof(tmp_mask
));
323 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_NORMAL_COMPLETION
);
324 ef
->mask_length
= mask_length
;
327 /* qemu object creation and initialization functions */
329 #define TYPE_SCLP_EVENTS_BUS "s390-sclp-events-bus"
331 static void sclp_events_bus_realize(BusState
*bus
, Error
**errp
)
336 /* TODO: recursive realization has to be done in common code */
337 QTAILQ_FOREACH(kid
, &bus
->children
, sibling
) {
338 DeviceState
*dev
= kid
->child
;
340 object_property_set_bool(OBJECT(dev
), true, "realized", &err
);
342 error_propagate(errp
, err
);
348 static void sclp_events_bus_class_init(ObjectClass
*klass
, void *data
)
350 BusClass
*bc
= BUS_CLASS(klass
);
352 bc
->realize
= sclp_events_bus_realize
;
355 static const TypeInfo sclp_events_bus_info
= {
356 .name
= TYPE_SCLP_EVENTS_BUS
,
358 .class_init
= sclp_events_bus_class_init
,
361 static void command_handler(SCLPEventFacility
*ef
, SCCB
*sccb
, uint64_t code
)
363 switch (code
& SCLP_CMD_CODE_MASK
) {
364 case SCLP_CMD_READ_EVENT_DATA
:
365 read_event_data(ef
, sccb
);
367 case SCLP_CMD_WRITE_EVENT_DATA
:
368 write_event_data(ef
, sccb
);
370 case SCLP_CMD_WRITE_EVENT_MASK
:
371 write_event_mask(ef
, sccb
);
376 static bool vmstate_event_facility_mask64_needed(void *opaque
)
378 SCLPEventFacility
*ef
= opaque
;
380 return (ef
->receive_mask
& 0xFFFFFFFF) != 0;
383 static bool vmstate_event_facility_mask_length_needed(void *opaque
)
385 SCLPEventFacility
*ef
= opaque
;
387 return ef
->allow_all_mask_sizes
;
390 static const VMStateDescription vmstate_event_facility_mask64
= {
391 .name
= "vmstate-event-facility/mask64",
393 .minimum_version_id
= 0,
394 .needed
= vmstate_event_facility_mask64_needed
,
395 .fields
= (VMStateField
[]) {
396 VMSTATE_UINT32(receive_mask_pieces
[RECV_MASK_LOWER
], SCLPEventFacility
),
397 VMSTATE_END_OF_LIST()
401 static const VMStateDescription vmstate_event_facility_mask_length
= {
402 .name
= "vmstate-event-facility/mask_length",
404 .minimum_version_id
= 0,
405 .needed
= vmstate_event_facility_mask_length_needed
,
406 .fields
= (VMStateField
[]) {
407 VMSTATE_UINT16(mask_length
, SCLPEventFacility
),
408 VMSTATE_END_OF_LIST()
412 static const VMStateDescription vmstate_event_facility
= {
413 .name
= "vmstate-event-facility",
415 .minimum_version_id
= 0,
416 .fields
= (VMStateField
[]) {
417 VMSTATE_UINT32(receive_mask_pieces
[RECV_MASK_UPPER
], SCLPEventFacility
),
418 VMSTATE_END_OF_LIST()
420 .subsections
= (const VMStateDescription
* []) {
421 &vmstate_event_facility_mask64
,
422 &vmstate_event_facility_mask_length
,
427 static void sclp_event_set_allow_all_mask_sizes(Object
*obj
, bool value
,
430 SCLPEventFacility
*ef
= (SCLPEventFacility
*)obj
;
432 ef
->allow_all_mask_sizes
= value
;
435 static bool sclp_event_get_allow_all_mask_sizes(Object
*obj
, Error
**errp
)
437 SCLPEventFacility
*ef
= (SCLPEventFacility
*)obj
;
439 return ef
->allow_all_mask_sizes
;
442 static void init_event_facility(Object
*obj
)
444 SCLPEventFacility
*event_facility
= EVENT_FACILITY(obj
);
445 DeviceState
*sdev
= DEVICE(obj
);
448 event_facility
->mask_length
= 4;
449 event_facility
->allow_all_mask_sizes
= true;
450 object_property_add_bool(obj
, "allow_all_mask_sizes",
451 sclp_event_get_allow_all_mask_sizes
,
452 sclp_event_set_allow_all_mask_sizes
);
453 /* Spawn a new bus for SCLP events */
454 qbus_create_inplace(&event_facility
->sbus
, sizeof(event_facility
->sbus
),
455 TYPE_SCLP_EVENTS_BUS
, sdev
, NULL
);
457 new = object_new(TYPE_SCLP_QUIESCE
);
458 object_property_add_child(obj
, TYPE_SCLP_QUIESCE
, new);
460 qdev_set_parent_bus(DEVICE(new), BUS(&event_facility
->sbus
));
462 new = object_new(TYPE_SCLP_CPU_HOTPLUG
);
463 object_property_add_child(obj
, TYPE_SCLP_CPU_HOTPLUG
, new);
465 qdev_set_parent_bus(DEVICE(new), BUS(&event_facility
->sbus
));
466 /* the facility will automatically realize the devices via the bus */
469 static void reset_event_facility(DeviceState
*dev
)
471 SCLPEventFacility
*sdev
= EVENT_FACILITY(dev
);
473 sdev
->receive_mask
= 0;
476 static void init_event_facility_class(ObjectClass
*klass
, void *data
)
478 SysBusDeviceClass
*sbdc
= SYS_BUS_DEVICE_CLASS(klass
);
479 DeviceClass
*dc
= DEVICE_CLASS(sbdc
);
480 SCLPEventFacilityClass
*k
= EVENT_FACILITY_CLASS(dc
);
482 dc
->reset
= reset_event_facility
;
483 dc
->vmsd
= &vmstate_event_facility
;
484 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
485 k
->command_handler
= command_handler
;
486 k
->event_pending
= event_pending
;
489 static const TypeInfo sclp_event_facility_info
= {
490 .name
= TYPE_SCLP_EVENT_FACILITY
,
491 .parent
= TYPE_SYS_BUS_DEVICE
,
492 .instance_init
= init_event_facility
,
493 .instance_size
= sizeof(SCLPEventFacility
),
494 .class_init
= init_event_facility_class
,
495 .class_size
= sizeof(SCLPEventFacilityClass
),
498 static void event_realize(DeviceState
*qdev
, Error
**errp
)
500 SCLPEvent
*event
= SCLP_EVENT(qdev
);
501 SCLPEventClass
*child
= SCLP_EVENT_GET_CLASS(event
);
504 int rc
= child
->init(event
);
506 error_setg(errp
, "SCLP event initialization failed.");
512 static void event_class_init(ObjectClass
*klass
, void *data
)
514 DeviceClass
*dc
= DEVICE_CLASS(klass
);
516 dc
->bus_type
= TYPE_SCLP_EVENTS_BUS
;
517 dc
->realize
= event_realize
;
520 static const TypeInfo sclp_event_type_info
= {
521 .name
= TYPE_SCLP_EVENT
,
522 .parent
= TYPE_DEVICE
,
523 .instance_size
= sizeof(SCLPEvent
),
524 .class_init
= event_class_init
,
525 .class_size
= sizeof(SCLPEventClass
),
529 static void register_types(void)
531 type_register_static(&sclp_events_bus_info
);
532 type_register_static(&sclp_event_facility_info
);
533 type_register_static(&sclp_event_type_info
);
536 type_init(register_types
)
538 BusState
*sclp_get_event_facility_bus(void)
543 busobj
= object_resolve_path_type("", TYPE_SCLP_EVENTS_BUS
, NULL
);
544 sbus
= OBJECT_CHECK(SCLPEventsBus
, busobj
, TYPE_SCLP_EVENTS_BUS
);