2 * virtio ccw target implementation
4 * Copyright 2012,2015 IBM Corp.
5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 * Pierre Morel <pmorel@linux.vnet.ibm.com>
8 * This work is licensed under the terms of the GNU GPL, version 2 or (at
9 * your option) any later version. See the COPYING file in the top-level
13 #include "qemu/osdep.h"
14 #include "qapi/error.h"
15 #include "sysemu/kvm.h"
17 #include "hw/virtio/virtio.h"
18 #include "migration/qemu-file-types.h"
19 #include "hw/virtio/virtio-net.h"
20 #include "hw/sysbus.h"
21 #include "qemu/bitops.h"
22 #include "qemu/error-report.h"
23 #include "qemu/module.h"
24 #include "hw/virtio/virtio-access.h"
25 #include "hw/virtio/virtio-bus.h"
26 #include "hw/s390x/adapter.h"
27 #include "hw/s390x/s390_flic.h"
29 #include "hw/s390x/ioinst.h"
30 #include "hw/s390x/css.h"
31 #include "virtio-ccw.h"
33 #include "hw/s390x/css-bridge.h"
34 #include "hw/s390x/s390-virtio-ccw.h"
36 #define NR_CLASSIC_INDICATOR_BITS 64
38 static int virtio_ccw_dev_post_load(void *opaque
, int version_id
)
40 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(opaque
);
41 CcwDevice
*ccw_dev
= CCW_DEVICE(dev
);
42 CCWDeviceClass
*ck
= CCW_DEVICE_GET_CLASS(ccw_dev
);
44 ccw_dev
->sch
->driver_data
= dev
;
45 if (ccw_dev
->sch
->thinint_active
) {
46 dev
->routes
.adapter
.adapter_id
= css_get_adapter_id(
47 CSS_IO_ADAPTER_VIRTIO
,
50 /* Re-fill subch_id after loading the subchannel states.*/
52 ck
->refill_ids(ccw_dev
);
57 typedef struct VirtioCcwDeviceTmp
{
58 VirtioCcwDevice
*parent
;
59 uint16_t config_vector
;
62 static int virtio_ccw_dev_tmp_pre_save(void *opaque
)
64 VirtioCcwDeviceTmp
*tmp
= opaque
;
65 VirtioCcwDevice
*dev
= tmp
->parent
;
66 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
68 tmp
->config_vector
= vdev
->config_vector
;
73 static int virtio_ccw_dev_tmp_post_load(void *opaque
, int version_id
)
75 VirtioCcwDeviceTmp
*tmp
= opaque
;
76 VirtioCcwDevice
*dev
= tmp
->parent
;
77 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
79 vdev
->config_vector
= tmp
->config_vector
;
83 const VMStateDescription vmstate_virtio_ccw_dev_tmp
= {
84 .name
= "s390_virtio_ccw_dev_tmp",
85 .pre_save
= virtio_ccw_dev_tmp_pre_save
,
86 .post_load
= virtio_ccw_dev_tmp_post_load
,
87 .fields
= (VMStateField
[]) {
88 VMSTATE_UINT16(config_vector
, VirtioCcwDeviceTmp
),
93 const VMStateDescription vmstate_virtio_ccw_dev
= {
94 .name
= "s390_virtio_ccw_dev",
96 .minimum_version_id
= 1,
97 .post_load
= virtio_ccw_dev_post_load
,
98 .fields
= (VMStateField
[]) {
99 VMSTATE_CCW_DEVICE(parent_obj
, VirtioCcwDevice
),
100 VMSTATE_PTR_TO_IND_ADDR(indicators
, VirtioCcwDevice
),
101 VMSTATE_PTR_TO_IND_ADDR(indicators2
, VirtioCcwDevice
),
102 VMSTATE_PTR_TO_IND_ADDR(summary_indicator
, VirtioCcwDevice
),
104 * Ugly hack because VirtIODevice does not migrate itself.
105 * This also makes legacy via vmstate_save_state possible.
107 VMSTATE_WITH_TMP(VirtioCcwDevice
, VirtioCcwDeviceTmp
,
108 vmstate_virtio_ccw_dev_tmp
),
109 VMSTATE_STRUCT(routes
, VirtioCcwDevice
, 1, vmstate_adapter_routes
,
111 VMSTATE_UINT8(thinint_isc
, VirtioCcwDevice
),
112 VMSTATE_INT32(revision
, VirtioCcwDevice
),
113 VMSTATE_END_OF_LIST()
117 static void virtio_ccw_bus_new(VirtioBusState
*bus
, size_t bus_size
,
118 VirtioCcwDevice
*dev
);
120 VirtIODevice
*virtio_ccw_get_vdev(SubchDev
*sch
)
122 VirtIODevice
*vdev
= NULL
;
123 VirtioCcwDevice
*dev
= sch
->driver_data
;
126 vdev
= virtio_bus_get_device(&dev
->bus
);
131 static void virtio_ccw_start_ioeventfd(VirtioCcwDevice
*dev
)
133 virtio_bus_start_ioeventfd(&dev
->bus
);
136 static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice
*dev
)
138 virtio_bus_stop_ioeventfd(&dev
->bus
);
141 static bool virtio_ccw_ioeventfd_enabled(DeviceState
*d
)
143 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
145 return (dev
->flags
& VIRTIO_CCW_FLAG_USE_IOEVENTFD
) != 0;
148 static int virtio_ccw_ioeventfd_assign(DeviceState
*d
, EventNotifier
*notifier
,
151 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
152 CcwDevice
*ccw_dev
= CCW_DEVICE(dev
);
153 SubchDev
*sch
= ccw_dev
->sch
;
154 uint32_t sch_id
= (css_build_subchannel_id(sch
) << 16) | sch
->schid
;
156 return s390_assign_subch_ioeventfd(notifier
, sch_id
, n
, assign
);
159 /* Communication blocks used by several channel commands. */
160 typedef struct VqInfoBlockLegacy
{
165 } QEMU_PACKED VqInfoBlockLegacy
;
167 typedef struct VqInfoBlock
{
174 } QEMU_PACKED VqInfoBlock
;
176 typedef struct VqConfigBlock
{
179 } QEMU_PACKED VqConfigBlock
;
181 typedef struct VirtioFeatDesc
{
184 } QEMU_PACKED VirtioFeatDesc
;
186 typedef struct VirtioThinintInfo
{
187 hwaddr summary_indicator
;
188 hwaddr device_indicator
;
191 } QEMU_PACKED VirtioThinintInfo
;
193 typedef struct VirtioRevInfo
{
197 } QEMU_PACKED VirtioRevInfo
;
199 /* Specify where the virtqueues for the subchannel are in guest memory. */
200 static int virtio_ccw_set_vqs(SubchDev
*sch
, VqInfoBlock
*info
,
201 VqInfoBlockLegacy
*linfo
)
203 VirtIODevice
*vdev
= virtio_ccw_get_vdev(sch
);
204 uint16_t index
= info
? info
->index
: linfo
->index
;
205 uint16_t num
= info
? info
->num
: linfo
->num
;
206 uint64_t desc
= info
? info
->desc
: linfo
->queue
;
208 if (index
>= VIRTIO_QUEUE_MAX
) {
212 /* Current code in virtio.c relies on 4K alignment. */
213 if (linfo
&& desc
&& (linfo
->align
!= 4096)) {
222 virtio_queue_set_rings(vdev
, index
, desc
, info
->avail
, info
->used
);
224 virtio_queue_set_addr(vdev
, index
, desc
);
227 virtio_queue_set_vector(vdev
, index
, VIRTIO_NO_VECTOR
);
230 /* virtio-1 allows changing the ring size. */
231 if (virtio_queue_get_max_num(vdev
, index
) < num
) {
232 /* Fail if we exceed the maximum number. */
235 virtio_queue_set_num(vdev
, index
, num
);
236 } else if (virtio_queue_get_num(vdev
, index
) > num
) {
237 /* Fail if we don't have a big enough queue. */
240 /* We ignore possible increased num for legacy for compatibility. */
241 virtio_queue_set_vector(vdev
, index
, index
);
243 /* tell notify handler in case of config change */
244 vdev
->config_vector
= VIRTIO_QUEUE_MAX
;
248 static void virtio_ccw_reset_virtio(VirtioCcwDevice
*dev
, VirtIODevice
*vdev
)
250 CcwDevice
*ccw_dev
= CCW_DEVICE(dev
);
252 virtio_ccw_stop_ioeventfd(dev
);
254 if (dev
->indicators
) {
255 release_indicator(&dev
->routes
.adapter
, dev
->indicators
);
256 dev
->indicators
= NULL
;
258 if (dev
->indicators2
) {
259 release_indicator(&dev
->routes
.adapter
, dev
->indicators2
);
260 dev
->indicators2
= NULL
;
262 if (dev
->summary_indicator
) {
263 release_indicator(&dev
->routes
.adapter
, dev
->summary_indicator
);
264 dev
->summary_indicator
= NULL
;
266 ccw_dev
->sch
->thinint_active
= false;
269 static int virtio_ccw_handle_set_vq(SubchDev
*sch
, CCW1 ccw
, bool check_len
,
274 VqInfoBlockLegacy linfo
;
275 size_t info_len
= is_legacy
? sizeof(linfo
) : sizeof(info
);
278 if (ccw
.count
!= info_len
) {
281 } else if (ccw
.count
< info_len
) {
282 /* Can't execute command. */
289 ccw_dstream_read(&sch
->cds
, linfo
);
290 linfo
.queue
= be64_to_cpu(linfo
.queue
);
291 linfo
.align
= be32_to_cpu(linfo
.align
);
292 linfo
.index
= be16_to_cpu(linfo
.index
);
293 linfo
.num
= be16_to_cpu(linfo
.num
);
294 ret
= virtio_ccw_set_vqs(sch
, NULL
, &linfo
);
296 ccw_dstream_read(&sch
->cds
, info
);
297 info
.desc
= be64_to_cpu(info
.desc
);
298 info
.index
= be16_to_cpu(info
.index
);
299 info
.num
= be16_to_cpu(info
.num
);
300 info
.avail
= be64_to_cpu(info
.avail
);
301 info
.used
= be64_to_cpu(info
.used
);
302 ret
= virtio_ccw_set_vqs(sch
, &info
, NULL
);
304 sch
->curr_status
.scsw
.count
= 0;
308 static int virtio_ccw_cb(SubchDev
*sch
, CCW1 ccw
)
311 VirtioRevInfo revinfo
;
313 VirtioFeatDesc features
;
315 VqConfigBlock vq_config
;
316 VirtioCcwDevice
*dev
= sch
->driver_data
;
317 VirtIODevice
*vdev
= virtio_ccw_get_vdev(sch
);
320 VirtioThinintInfo thinint
;
326 trace_virtio_ccw_interpret_ccw(sch
->cssid
, sch
->ssid
, sch
->schid
,
328 check_len
= !((ccw
.flags
& CCW_FLAG_SLI
) && !(ccw
.flags
& CCW_FLAG_DC
));
330 if (dev
->force_revision_1
&& dev
->revision
< 0 &&
331 ccw
.cmd_code
!= CCW_CMD_SET_VIRTIO_REV
) {
333 * virtio-1 drivers must start with negotiating to a revision >= 1,
334 * so post a command reject for all other commands
339 /* Look at the command. */
340 switch (ccw
.cmd_code
) {
342 ret
= virtio_ccw_handle_set_vq(sch
, ccw
, check_len
, dev
->revision
< 1);
344 case CCW_CMD_VDEV_RESET
:
345 virtio_ccw_reset_virtio(dev
, vdev
);
348 case CCW_CMD_READ_FEAT
:
350 if (ccw
.count
!= sizeof(features
)) {
354 } else if (ccw
.count
< sizeof(features
)) {
355 /* Can't execute command. */
362 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
364 ccw_dstream_advance(&sch
->cds
, sizeof(features
.features
));
365 ccw_dstream_read(&sch
->cds
, features
.index
);
366 if (features
.index
== 0) {
367 if (dev
->revision
>= 1) {
368 /* Don't offer legacy features for modern devices. */
369 features
.features
= (uint32_t)
370 (vdev
->host_features
& ~vdc
->legacy_features
);
372 features
.features
= (uint32_t)vdev
->host_features
;
374 } else if ((features
.index
== 1) && (dev
->revision
>= 1)) {
376 * Only offer feature bits beyond 31 if the guest has
377 * negotiated at least revision 1.
379 features
.features
= (uint32_t)(vdev
->host_features
>> 32);
381 /* Return zeroes if the guest supports more feature bits. */
382 features
.features
= 0;
384 ccw_dstream_rewind(&sch
->cds
);
385 features
.features
= cpu_to_le32(features
.features
);
386 ccw_dstream_write(&sch
->cds
, features
.features
);
387 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(features
);
391 case CCW_CMD_WRITE_FEAT
:
393 if (ccw
.count
!= sizeof(features
)) {
397 } else if (ccw
.count
< sizeof(features
)) {
398 /* Can't execute command. */
405 ccw_dstream_read(&sch
->cds
, features
);
406 features
.features
= le32_to_cpu(features
.features
);
407 if (features
.index
== 0) {
408 virtio_set_features(vdev
,
409 (vdev
->guest_features
& 0xffffffff00000000ULL
) |
411 } else if ((features
.index
== 1) && (dev
->revision
>= 1)) {
413 * If the guest did not negotiate at least revision 1,
414 * we did not offer it any feature bits beyond 31. Such a
415 * guest passing us any bit here is therefore buggy.
417 virtio_set_features(vdev
,
418 (vdev
->guest_features
& 0x00000000ffffffffULL
) |
419 ((uint64_t)features
.features
<< 32));
422 * If the guest supports more feature bits, assert that it
423 * passes us zeroes for those we don't support.
425 if (features
.features
) {
426 qemu_log_mask(LOG_GUEST_ERROR
,
427 "Guest bug: features[%i]=%x (expected 0)",
428 features
.index
, features
.features
);
429 /* XXX: do a unit check here? */
432 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(features
);
436 case CCW_CMD_READ_CONF
:
438 if (ccw
.count
> vdev
->config_len
) {
443 len
= MIN(ccw
.count
, vdev
->config_len
);
447 virtio_bus_get_vdev_config(&dev
->bus
, vdev
->config
);
448 ccw_dstream_write_buf(&sch
->cds
, vdev
->config
, len
);
449 sch
->curr_status
.scsw
.count
= ccw
.count
- len
;
453 case CCW_CMD_WRITE_CONF
:
455 if (ccw
.count
> vdev
->config_len
) {
460 len
= MIN(ccw
.count
, vdev
->config_len
);
464 ret
= ccw_dstream_read_buf(&sch
->cds
, vdev
->config
, len
);
466 virtio_bus_set_vdev_config(&dev
->bus
, vdev
->config
);
467 sch
->curr_status
.scsw
.count
= ccw
.count
- len
;
471 case CCW_CMD_READ_STATUS
:
473 if (ccw
.count
!= sizeof(status
)) {
477 } else if (ccw
.count
< sizeof(status
)) {
478 /* Can't execute command. */
485 address_space_stb(&address_space_memory
, ccw
.cda
, vdev
->status
,
486 MEMTXATTRS_UNSPECIFIED
, NULL
);
487 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(vdev
->status
);
491 case CCW_CMD_WRITE_STATUS
:
493 if (ccw
.count
!= sizeof(status
)) {
497 } else if (ccw
.count
< sizeof(status
)) {
498 /* Can't execute command. */
505 ccw_dstream_read(&sch
->cds
, status
);
506 if (!(status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
507 virtio_ccw_stop_ioeventfd(dev
);
509 if (virtio_set_status(vdev
, status
) == 0) {
510 if (vdev
->status
== 0) {
511 virtio_ccw_reset_virtio(dev
, vdev
);
513 if (status
& VIRTIO_CONFIG_S_DRIVER_OK
) {
514 virtio_ccw_start_ioeventfd(dev
);
516 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(status
);
519 /* Trigger a command reject. */
524 case CCW_CMD_SET_IND
:
526 if (ccw
.count
!= sizeof(indicators
)) {
530 } else if (ccw
.count
< sizeof(indicators
)) {
531 /* Can't execute command. */
535 if (sch
->thinint_active
) {
536 /* Trigger a command reject. */
540 if (virtio_get_num_queues(vdev
) > NR_CLASSIC_INDICATOR_BITS
) {
541 /* More queues than indicator bits --> trigger a reject */
548 ccw_dstream_read(&sch
->cds
, indicators
);
549 indicators
= be64_to_cpu(indicators
);
550 dev
->indicators
= get_indicator(indicators
, sizeof(uint64_t));
551 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(indicators
);
555 case CCW_CMD_SET_CONF_IND
:
557 if (ccw
.count
!= sizeof(indicators
)) {
561 } else if (ccw
.count
< sizeof(indicators
)) {
562 /* Can't execute command. */
569 ccw_dstream_read(&sch
->cds
, indicators
);
570 indicators
= be64_to_cpu(indicators
);
571 dev
->indicators2
= get_indicator(indicators
, sizeof(uint64_t));
572 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(indicators
);
576 case CCW_CMD_READ_VQ_CONF
:
578 if (ccw
.count
!= sizeof(vq_config
)) {
582 } else if (ccw
.count
< sizeof(vq_config
)) {
583 /* Can't execute command. */
590 ccw_dstream_read(&sch
->cds
, vq_config
.index
);
591 vq_config
.index
= be16_to_cpu(vq_config
.index
);
592 if (vq_config
.index
>= VIRTIO_QUEUE_MAX
) {
596 vq_config
.num_max
= virtio_queue_get_num(vdev
,
598 vq_config
.num_max
= cpu_to_be16(vq_config
.num_max
);
599 ccw_dstream_write(&sch
->cds
, vq_config
.num_max
);
600 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(vq_config
);
604 case CCW_CMD_SET_IND_ADAPTER
:
606 if (ccw
.count
!= sizeof(thinint
)) {
610 } else if (ccw
.count
< sizeof(thinint
)) {
611 /* Can't execute command. */
617 } else if (dev
->indicators
&& !sch
->thinint_active
) {
618 /* Trigger a command reject. */
621 if (ccw_dstream_read(&sch
->cds
, thinint
)) {
624 thinint
.ind_bit
= be64_to_cpu(thinint
.ind_bit
);
625 thinint
.summary_indicator
=
626 be64_to_cpu(thinint
.summary_indicator
);
627 thinint
.device_indicator
=
628 be64_to_cpu(thinint
.device_indicator
);
630 dev
->summary_indicator
=
631 get_indicator(thinint
.summary_indicator
, sizeof(uint8_t));
633 get_indicator(thinint
.device_indicator
,
634 thinint
.ind_bit
/ 8 + 1);
635 dev
->thinint_isc
= thinint
.isc
;
636 dev
->routes
.adapter
.ind_offset
= thinint
.ind_bit
;
637 dev
->routes
.adapter
.summary_offset
= 7;
638 dev
->routes
.adapter
.adapter_id
= css_get_adapter_id(
639 CSS_IO_ADAPTER_VIRTIO
,
641 sch
->thinint_active
= ((dev
->indicators
!= NULL
) &&
642 (dev
->summary_indicator
!= NULL
));
643 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(thinint
);
648 case CCW_CMD_SET_VIRTIO_REV
:
649 len
= sizeof(revinfo
);
650 if (ccw
.count
< len
) {
658 ccw_dstream_read_buf(&sch
->cds
, &revinfo
, 4);
659 revinfo
.revision
= be16_to_cpu(revinfo
.revision
);
660 revinfo
.length
= be16_to_cpu(revinfo
.length
);
661 if (ccw
.count
< len
+ revinfo
.length
||
662 (check_len
&& ccw
.count
> len
+ revinfo
.length
)) {
667 * Once we start to support revisions with additional data, we'll
668 * need to fetch it here. Nothing to do for now, though.
670 if (dev
->revision
>= 0 ||
671 revinfo
.revision
> virtio_ccw_rev_max(dev
) ||
672 (dev
->force_revision_1
&& !revinfo
.revision
)) {
677 dev
->revision
= revinfo
.revision
;
686 static void virtio_sch_disable_cb(SubchDev
*sch
)
688 VirtioCcwDevice
*dev
= sch
->driver_data
;
693 static void virtio_ccw_device_realize(VirtioCcwDevice
*dev
, Error
**errp
)
695 VirtIOCCWDeviceClass
*k
= VIRTIO_CCW_DEVICE_GET_CLASS(dev
);
696 CcwDevice
*ccw_dev
= CCW_DEVICE(dev
);
697 CCWDeviceClass
*ck
= CCW_DEVICE_GET_CLASS(ccw_dev
);
702 sch
= css_create_sch(ccw_dev
->devno
, errp
);
706 if (!virtio_ccw_rev_max(dev
) && dev
->force_revision_1
) {
707 error_setg(&err
, "Invalid value of property max_rev "
708 "(is %d expected >= 1)", virtio_ccw_rev_max(dev
));
712 sch
->driver_data
= dev
;
713 sch
->ccw_cb
= virtio_ccw_cb
;
714 sch
->disable_cb
= virtio_sch_disable_cb
;
715 sch
->id
.reserved
= 0xff;
716 sch
->id
.cu_type
= VIRTIO_CCW_CU_TYPE
;
717 sch
->do_subchannel_work
= do_subchannel_work_virtual
;
719 dev
->indicators
= NULL
;
721 for (i
= 0; i
< ADAPTER_ROUTES_MAX_GSI
; i
++) {
722 dev
->routes
.gsi
[i
] = -1;
724 css_sch_build_virtual_schib(sch
, 0, VIRTIO_CCW_CHPID_TYPE
);
726 trace_virtio_ccw_new_device(
727 sch
->cssid
, sch
->ssid
, sch
->schid
, sch
->devno
,
728 ccw_dev
->devno
.valid
? "user-configured" : "auto-configured");
730 if (kvm_enabled() && !kvm_eventfds_enabled()) {
731 dev
->flags
&= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD
;
735 k
->realize(dev
, &err
);
741 ck
->realize(ccw_dev
, &err
);
749 error_propagate(errp
, err
);
750 css_subch_assign(sch
->cssid
, sch
->ssid
, sch
->schid
, sch
->devno
, NULL
);
755 static void virtio_ccw_device_unrealize(VirtioCcwDevice
*dev
, Error
**errp
)
757 VirtIOCCWDeviceClass
*dc
= VIRTIO_CCW_DEVICE_GET_CLASS(dev
);
758 CcwDevice
*ccw_dev
= CCW_DEVICE(dev
);
759 SubchDev
*sch
= ccw_dev
->sch
;
762 dc
->unrealize(dev
, errp
);
766 css_subch_assign(sch
->cssid
, sch
->ssid
, sch
->schid
, sch
->devno
, NULL
);
770 if (dev
->indicators
) {
771 release_indicator(&dev
->routes
.adapter
, dev
->indicators
);
772 dev
->indicators
= NULL
;
776 /* DeviceState to VirtioCcwDevice. Note: used on datapath,
777 * be careful and test performance if you change this.
779 static inline VirtioCcwDevice
*to_virtio_ccw_dev_fast(DeviceState
*d
)
781 CcwDevice
*ccw_dev
= to_ccw_dev_fast(d
);
783 return container_of(ccw_dev
, VirtioCcwDevice
, parent_obj
);
786 static uint8_t virtio_set_ind_atomic(SubchDev
*sch
, uint64_t ind_loc
,
789 uint8_t ind_old
, ind_new
;
793 ind_addr
= cpu_physical_memory_map(ind_loc
, &len
, true);
795 error_report("%s(%x.%x.%04x): unable to access indicator",
796 __func__
, sch
->cssid
, sch
->ssid
, sch
->schid
);
801 ind_new
= ind_old
| to_be_set
;
802 } while (atomic_cmpxchg(ind_addr
, ind_old
, ind_new
) != ind_old
);
803 trace_virtio_ccw_set_ind(ind_loc
, ind_old
, ind_new
);
804 cpu_physical_memory_unmap(ind_addr
, len
, 1, len
);
809 static void virtio_ccw_notify(DeviceState
*d
, uint16_t vector
)
811 VirtioCcwDevice
*dev
= to_virtio_ccw_dev_fast(d
);
812 CcwDevice
*ccw_dev
= to_ccw_dev_fast(d
);
813 SubchDev
*sch
= ccw_dev
->sch
;
816 if (vector
== VIRTIO_NO_VECTOR
) {
820 * vector < VIRTIO_QUEUE_MAX: notification for a virtqueue
821 * vector == VIRTIO_QUEUE_MAX: configuration change notification
822 * bits beyond that are unused and should never be notified for
824 assert(vector
<= VIRTIO_QUEUE_MAX
);
826 if (vector
< VIRTIO_QUEUE_MAX
) {
827 if (!dev
->indicators
) {
830 if (sch
->thinint_active
) {
832 * In the adapter interrupt case, indicators points to a
833 * memory area that may be (way) larger than 64 bit and
834 * ind_bit indicates the start of the indicators in a big
837 uint64_t ind_bit
= dev
->routes
.adapter
.ind_offset
;
839 virtio_set_ind_atomic(sch
, dev
->indicators
->addr
+
840 (ind_bit
+ vector
) / 8,
841 0x80 >> ((ind_bit
+ vector
) % 8));
842 if (!virtio_set_ind_atomic(sch
, dev
->summary_indicator
->addr
,
844 css_adapter_interrupt(CSS_IO_ADAPTER_VIRTIO
, dev
->thinint_isc
);
847 assert(vector
< NR_CLASSIC_INDICATOR_BITS
);
848 indicators
= address_space_ldq(&address_space_memory
,
849 dev
->indicators
->addr
,
850 MEMTXATTRS_UNSPECIFIED
,
852 indicators
|= 1ULL << vector
;
853 address_space_stq(&address_space_memory
, dev
->indicators
->addr
,
854 indicators
, MEMTXATTRS_UNSPECIFIED
, NULL
);
855 css_conditional_io_interrupt(sch
);
858 if (!dev
->indicators2
) {
861 indicators
= address_space_ldq(&address_space_memory
,
862 dev
->indicators2
->addr
,
863 MEMTXATTRS_UNSPECIFIED
,
866 address_space_stq(&address_space_memory
, dev
->indicators2
->addr
,
867 indicators
, MEMTXATTRS_UNSPECIFIED
, NULL
);
868 css_conditional_io_interrupt(sch
);
872 static void virtio_ccw_reset(DeviceState
*d
)
874 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
875 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
876 VirtIOCCWDeviceClass
*vdc
= VIRTIO_CCW_DEVICE_GET_CLASS(dev
);
878 virtio_ccw_reset_virtio(dev
, vdev
);
879 if (vdc
->parent_reset
) {
880 vdc
->parent_reset(d
);
884 static void virtio_ccw_vmstate_change(DeviceState
*d
, bool running
)
886 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
889 virtio_ccw_start_ioeventfd(dev
);
891 virtio_ccw_stop_ioeventfd(dev
);
895 static bool virtio_ccw_query_guest_notifiers(DeviceState
*d
)
897 CcwDevice
*dev
= CCW_DEVICE(d
);
899 return !!(dev
->sch
->curr_status
.pmcw
.flags
& PMCW_FLAGS_MASK_ENA
);
902 static int virtio_ccw_get_mappings(VirtioCcwDevice
*dev
)
905 CcwDevice
*ccw_dev
= CCW_DEVICE(dev
);
907 if (!ccw_dev
->sch
->thinint_active
) {
911 r
= map_indicator(&dev
->routes
.adapter
, dev
->summary_indicator
);
915 r
= map_indicator(&dev
->routes
.adapter
, dev
->indicators
);
919 dev
->routes
.adapter
.summary_addr
= dev
->summary_indicator
->map
;
920 dev
->routes
.adapter
.ind_addr
= dev
->indicators
->map
;
925 static int virtio_ccw_setup_irqroutes(VirtioCcwDevice
*dev
, int nvqs
)
928 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
930 S390FLICState
*fs
= s390_get_flic();
931 S390FLICStateClass
*fsc
= s390_get_flic_class(fs
);
933 ret
= virtio_ccw_get_mappings(dev
);
937 for (i
= 0; i
< nvqs
; i
++) {
938 if (!virtio_queue_get_num(vdev
, i
)) {
942 dev
->routes
.num_routes
= i
;
943 return fsc
->add_adapter_routes(fs
, &dev
->routes
);
946 static void virtio_ccw_release_irqroutes(VirtioCcwDevice
*dev
, int nvqs
)
948 S390FLICState
*fs
= s390_get_flic();
949 S390FLICStateClass
*fsc
= s390_get_flic_class(fs
);
951 fsc
->release_adapter_routes(fs
, &dev
->routes
);
954 static int virtio_ccw_add_irqfd(VirtioCcwDevice
*dev
, int n
)
956 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
957 VirtQueue
*vq
= virtio_get_queue(vdev
, n
);
958 EventNotifier
*notifier
= virtio_queue_get_guest_notifier(vq
);
960 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state
, notifier
, NULL
,
964 static void virtio_ccw_remove_irqfd(VirtioCcwDevice
*dev
, int n
)
966 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
967 VirtQueue
*vq
= virtio_get_queue(vdev
, n
);
968 EventNotifier
*notifier
= virtio_queue_get_guest_notifier(vq
);
971 ret
= kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state
, notifier
,
976 static int virtio_ccw_set_guest_notifier(VirtioCcwDevice
*dev
, int n
,
977 bool assign
, bool with_irqfd
)
979 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
980 VirtQueue
*vq
= virtio_get_queue(vdev
, n
);
981 EventNotifier
*notifier
= virtio_queue_get_guest_notifier(vq
);
982 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
985 int r
= event_notifier_init(notifier
, 0);
990 virtio_queue_set_guest_notifier_fd_handler(vq
, true, with_irqfd
);
992 r
= virtio_ccw_add_irqfd(dev
, n
);
994 virtio_queue_set_guest_notifier_fd_handler(vq
, false,
1000 * We do not support individual masking for channel devices, so we
1001 * need to manually trigger any guest masking callbacks here.
1003 if (k
->guest_notifier_mask
&& vdev
->use_guest_notifier_mask
) {
1004 k
->guest_notifier_mask(vdev
, n
, false);
1006 /* get lost events and re-inject */
1007 if (k
->guest_notifier_pending
&&
1008 k
->guest_notifier_pending(vdev
, n
)) {
1009 event_notifier_set(notifier
);
1012 if (k
->guest_notifier_mask
&& vdev
->use_guest_notifier_mask
) {
1013 k
->guest_notifier_mask(vdev
, n
, true);
1016 virtio_ccw_remove_irqfd(dev
, n
);
1018 virtio_queue_set_guest_notifier_fd_handler(vq
, false, with_irqfd
);
1019 event_notifier_cleanup(notifier
);
1024 static int virtio_ccw_set_guest_notifiers(DeviceState
*d
, int nvqs
,
1027 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1028 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1029 CcwDevice
*ccw_dev
= CCW_DEVICE(d
);
1030 bool with_irqfd
= ccw_dev
->sch
->thinint_active
&& kvm_irqfds_enabled();
1033 if (with_irqfd
&& assigned
) {
1034 /* irq routes need to be set up before assigning irqfds */
1035 r
= virtio_ccw_setup_irqroutes(dev
, nvqs
);
1037 goto irqroute_error
;
1040 for (n
= 0; n
< nvqs
; n
++) {
1041 if (!virtio_queue_get_num(vdev
, n
)) {
1044 r
= virtio_ccw_set_guest_notifier(dev
, n
, assigned
, with_irqfd
);
1049 if (with_irqfd
&& !assigned
) {
1050 /* release irq routes after irqfds have been released */
1051 virtio_ccw_release_irqroutes(dev
, nvqs
);
1057 virtio_ccw_set_guest_notifier(dev
, n
, !assigned
, false);
1060 if (with_irqfd
&& assigned
) {
1061 virtio_ccw_release_irqroutes(dev
, nvqs
);
1066 static void virtio_ccw_save_queue(DeviceState
*d
, int n
, QEMUFile
*f
)
1068 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1069 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1071 qemu_put_be16(f
, virtio_queue_vector(vdev
, n
));
1074 static int virtio_ccw_load_queue(DeviceState
*d
, int n
, QEMUFile
*f
)
1076 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1077 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1080 qemu_get_be16s(f
, &vector
);
1081 virtio_queue_set_vector(vdev
, n
, vector
);
1086 static void virtio_ccw_save_config(DeviceState
*d
, QEMUFile
*f
)
1088 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1089 vmstate_save_state(f
, &vmstate_virtio_ccw_dev
, dev
, NULL
);
1092 static int virtio_ccw_load_config(DeviceState
*d
, QEMUFile
*f
)
1094 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1095 return vmstate_load_state(f
, &vmstate_virtio_ccw_dev
, dev
, 1);
1098 static void virtio_ccw_pre_plugged(DeviceState
*d
, Error
**errp
)
1100 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1101 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1103 if (dev
->max_rev
>= 1) {
1104 virtio_add_feature(&vdev
->host_features
, VIRTIO_F_VERSION_1
);
1108 /* This is called by virtio-bus just after the device is plugged. */
1109 static void virtio_ccw_device_plugged(DeviceState
*d
, Error
**errp
)
1111 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1112 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1113 CcwDevice
*ccw_dev
= CCW_DEVICE(d
);
1114 SubchDev
*sch
= ccw_dev
->sch
;
1115 int n
= virtio_get_num_queues(vdev
);
1116 S390FLICState
*flic
= s390_get_flic();
1118 if (!virtio_has_feature(vdev
->host_features
, VIRTIO_F_VERSION_1
)) {
1122 if (virtio_get_num_queues(vdev
) > VIRTIO_QUEUE_MAX
) {
1123 error_setg(errp
, "The number of virtqueues %d "
1124 "exceeds virtio limit %d", n
,
1128 if (virtio_get_num_queues(vdev
) > flic
->adapter_routes_max_batch
) {
1129 error_setg(errp
, "The number of virtqueues %d "
1130 "exceeds flic adapter route limit %d", n
,
1131 flic
->adapter_routes_max_batch
);
1135 sch
->id
.cu_model
= virtio_bus_get_vdev_id(&dev
->bus
);
1138 css_generate_sch_crws(sch
->cssid
, sch
->ssid
, sch
->schid
,
1142 static void virtio_ccw_device_unplugged(DeviceState
*d
)
1144 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1146 virtio_ccw_stop_ioeventfd(dev
);
1148 /**************** Virtio-ccw Bus Device Descriptions *******************/
1150 static void virtio_ccw_busdev_realize(DeviceState
*dev
, Error
**errp
)
1152 VirtioCcwDevice
*_dev
= (VirtioCcwDevice
*)dev
;
1154 virtio_ccw_bus_new(&_dev
->bus
, sizeof(_dev
->bus
), _dev
);
1155 virtio_ccw_device_realize(_dev
, errp
);
1158 static void virtio_ccw_busdev_unrealize(DeviceState
*dev
, Error
**errp
)
1160 VirtioCcwDevice
*_dev
= (VirtioCcwDevice
*)dev
;
1162 virtio_ccw_device_unrealize(_dev
, errp
);
1165 static void virtio_ccw_busdev_unplug(HotplugHandler
*hotplug_dev
,
1166 DeviceState
*dev
, Error
**errp
)
1168 VirtioCcwDevice
*_dev
= to_virtio_ccw_dev_fast(dev
);
1170 virtio_ccw_stop_ioeventfd(_dev
);
1173 static void virtio_ccw_device_class_init(ObjectClass
*klass
, void *data
)
1175 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1176 CCWDeviceClass
*k
= CCW_DEVICE_CLASS(dc
);
1177 VirtIOCCWDeviceClass
*vdc
= VIRTIO_CCW_DEVICE_CLASS(klass
);
1179 k
->unplug
= virtio_ccw_busdev_unplug
;
1180 dc
->realize
= virtio_ccw_busdev_realize
;
1181 dc
->unrealize
= virtio_ccw_busdev_unrealize
;
1182 dc
->bus_type
= TYPE_VIRTUAL_CSS_BUS
;
1183 device_class_set_parent_reset(dc
, virtio_ccw_reset
, &vdc
->parent_reset
);
1186 static const TypeInfo virtio_ccw_device_info
= {
1187 .name
= TYPE_VIRTIO_CCW_DEVICE
,
1188 .parent
= TYPE_CCW_DEVICE
,
1189 .instance_size
= sizeof(VirtioCcwDevice
),
1190 .class_init
= virtio_ccw_device_class_init
,
1191 .class_size
= sizeof(VirtIOCCWDeviceClass
),
1195 /* virtio-ccw-bus */
1197 static void virtio_ccw_bus_new(VirtioBusState
*bus
, size_t bus_size
,
1198 VirtioCcwDevice
*dev
)
1200 DeviceState
*qdev
= DEVICE(dev
);
1201 char virtio_bus_name
[] = "virtio-bus";
1203 qbus_create_inplace(bus
, bus_size
, TYPE_VIRTIO_CCW_BUS
,
1204 qdev
, virtio_bus_name
);
1207 static void virtio_ccw_bus_class_init(ObjectClass
*klass
, void *data
)
1209 VirtioBusClass
*k
= VIRTIO_BUS_CLASS(klass
);
1210 BusClass
*bus_class
= BUS_CLASS(klass
);
1212 bus_class
->max_dev
= 1;
1213 k
->notify
= virtio_ccw_notify
;
1214 k
->vmstate_change
= virtio_ccw_vmstate_change
;
1215 k
->query_guest_notifiers
= virtio_ccw_query_guest_notifiers
;
1216 k
->set_guest_notifiers
= virtio_ccw_set_guest_notifiers
;
1217 k
->save_queue
= virtio_ccw_save_queue
;
1218 k
->load_queue
= virtio_ccw_load_queue
;
1219 k
->save_config
= virtio_ccw_save_config
;
1220 k
->load_config
= virtio_ccw_load_config
;
1221 k
->pre_plugged
= virtio_ccw_pre_plugged
;
1222 k
->device_plugged
= virtio_ccw_device_plugged
;
1223 k
->device_unplugged
= virtio_ccw_device_unplugged
;
1224 k
->ioeventfd_enabled
= virtio_ccw_ioeventfd_enabled
;
1225 k
->ioeventfd_assign
= virtio_ccw_ioeventfd_assign
;
1228 static const TypeInfo virtio_ccw_bus_info
= {
1229 .name
= TYPE_VIRTIO_CCW_BUS
,
1230 .parent
= TYPE_VIRTIO_BUS
,
1231 .instance_size
= sizeof(VirtioCcwBusState
),
1232 .class_init
= virtio_ccw_bus_class_init
,
1235 static void virtio_ccw_register(void)
1237 type_register_static(&virtio_ccw_bus_info
);
1238 type_register_static(&virtio_ccw_device_info
);
1241 type_init(virtio_ccw_register
)