2 * virtio ccw target implementation
4 * Copyright 2012,2015 IBM Corp.
5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 * Pierre Morel <pmorel@linux.vnet.ibm.com>
8 * This work is licensed under the terms of the GNU GPL, version 2 or (at
9 * your option) any later version. See the COPYING file in the top-level
13 #include "qemu/osdep.h"
14 #include "qapi/error.h"
16 #include "sysemu/sysemu.h"
17 #include "sysemu/kvm.h"
19 #include "hw/virtio/virtio.h"
20 #include "hw/virtio/virtio-net.h"
21 #include "hw/sysbus.h"
22 #include "qemu/bitops.h"
23 #include "qemu/error-report.h"
24 #include "qemu/module.h"
25 #include "hw/virtio/virtio-access.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "hw/s390x/adapter.h"
28 #include "hw/s390x/s390_flic.h"
30 #include "hw/s390x/ioinst.h"
31 #include "hw/s390x/css.h"
32 #include "virtio-ccw.h"
34 #include "hw/s390x/css-bridge.h"
35 #include "hw/s390x/s390-virtio-ccw.h"
37 #define NR_CLASSIC_INDICATOR_BITS 64
39 static int virtio_ccw_dev_post_load(void *opaque
, int version_id
)
41 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(opaque
);
42 CcwDevice
*ccw_dev
= CCW_DEVICE(dev
);
43 CCWDeviceClass
*ck
= CCW_DEVICE_GET_CLASS(ccw_dev
);
45 ccw_dev
->sch
->driver_data
= dev
;
46 if (ccw_dev
->sch
->thinint_active
) {
47 dev
->routes
.adapter
.adapter_id
= css_get_adapter_id(
48 CSS_IO_ADAPTER_VIRTIO
,
51 /* Re-fill subch_id after loading the subchannel states.*/
53 ck
->refill_ids(ccw_dev
);
58 typedef struct VirtioCcwDeviceTmp
{
59 VirtioCcwDevice
*parent
;
60 uint16_t config_vector
;
63 static int virtio_ccw_dev_tmp_pre_save(void *opaque
)
65 VirtioCcwDeviceTmp
*tmp
= opaque
;
66 VirtioCcwDevice
*dev
= tmp
->parent
;
67 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
69 tmp
->config_vector
= vdev
->config_vector
;
74 static int virtio_ccw_dev_tmp_post_load(void *opaque
, int version_id
)
76 VirtioCcwDeviceTmp
*tmp
= opaque
;
77 VirtioCcwDevice
*dev
= tmp
->parent
;
78 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
80 vdev
->config_vector
= tmp
->config_vector
;
84 const VMStateDescription vmstate_virtio_ccw_dev_tmp
= {
85 .name
= "s390_virtio_ccw_dev_tmp",
86 .pre_save
= virtio_ccw_dev_tmp_pre_save
,
87 .post_load
= virtio_ccw_dev_tmp_post_load
,
88 .fields
= (VMStateField
[]) {
89 VMSTATE_UINT16(config_vector
, VirtioCcwDeviceTmp
),
94 const VMStateDescription vmstate_virtio_ccw_dev
= {
95 .name
= "s390_virtio_ccw_dev",
97 .minimum_version_id
= 1,
98 .post_load
= virtio_ccw_dev_post_load
,
99 .fields
= (VMStateField
[]) {
100 VMSTATE_CCW_DEVICE(parent_obj
, VirtioCcwDevice
),
101 VMSTATE_PTR_TO_IND_ADDR(indicators
, VirtioCcwDevice
),
102 VMSTATE_PTR_TO_IND_ADDR(indicators2
, VirtioCcwDevice
),
103 VMSTATE_PTR_TO_IND_ADDR(summary_indicator
, VirtioCcwDevice
),
105 * Ugly hack because VirtIODevice does not migrate itself.
106 * This also makes legacy via vmstate_save_state possible.
108 VMSTATE_WITH_TMP(VirtioCcwDevice
, VirtioCcwDeviceTmp
,
109 vmstate_virtio_ccw_dev_tmp
),
110 VMSTATE_STRUCT(routes
, VirtioCcwDevice
, 1, vmstate_adapter_routes
,
112 VMSTATE_UINT8(thinint_isc
, VirtioCcwDevice
),
113 VMSTATE_INT32(revision
, VirtioCcwDevice
),
114 VMSTATE_END_OF_LIST()
118 static void virtio_ccw_bus_new(VirtioBusState
*bus
, size_t bus_size
,
119 VirtioCcwDevice
*dev
);
121 VirtIODevice
*virtio_ccw_get_vdev(SubchDev
*sch
)
123 VirtIODevice
*vdev
= NULL
;
124 VirtioCcwDevice
*dev
= sch
->driver_data
;
127 vdev
= virtio_bus_get_device(&dev
->bus
);
132 static void virtio_ccw_start_ioeventfd(VirtioCcwDevice
*dev
)
134 virtio_bus_start_ioeventfd(&dev
->bus
);
137 static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice
*dev
)
139 virtio_bus_stop_ioeventfd(&dev
->bus
);
142 static bool virtio_ccw_ioeventfd_enabled(DeviceState
*d
)
144 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
146 return (dev
->flags
& VIRTIO_CCW_FLAG_USE_IOEVENTFD
) != 0;
149 static int virtio_ccw_ioeventfd_assign(DeviceState
*d
, EventNotifier
*notifier
,
152 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
153 CcwDevice
*ccw_dev
= CCW_DEVICE(dev
);
154 SubchDev
*sch
= ccw_dev
->sch
;
155 uint32_t sch_id
= (css_build_subchannel_id(sch
) << 16) | sch
->schid
;
157 return s390_assign_subch_ioeventfd(notifier
, sch_id
, n
, assign
);
160 /* Communication blocks used by several channel commands. */
161 typedef struct VqInfoBlockLegacy
{
166 } QEMU_PACKED VqInfoBlockLegacy
;
168 typedef struct VqInfoBlock
{
175 } QEMU_PACKED VqInfoBlock
;
177 typedef struct VqConfigBlock
{
180 } QEMU_PACKED VqConfigBlock
;
182 typedef struct VirtioFeatDesc
{
185 } QEMU_PACKED VirtioFeatDesc
;
187 typedef struct VirtioThinintInfo
{
188 hwaddr summary_indicator
;
189 hwaddr device_indicator
;
192 } QEMU_PACKED VirtioThinintInfo
;
194 typedef struct VirtioRevInfo
{
198 } QEMU_PACKED VirtioRevInfo
;
200 /* Specify where the virtqueues for the subchannel are in guest memory. */
201 static int virtio_ccw_set_vqs(SubchDev
*sch
, VqInfoBlock
*info
,
202 VqInfoBlockLegacy
*linfo
)
204 VirtIODevice
*vdev
= virtio_ccw_get_vdev(sch
);
205 uint16_t index
= info
? info
->index
: linfo
->index
;
206 uint16_t num
= info
? info
->num
: linfo
->num
;
207 uint64_t desc
= info
? info
->desc
: linfo
->queue
;
209 if (index
>= VIRTIO_QUEUE_MAX
) {
213 /* Current code in virtio.c relies on 4K alignment. */
214 if (linfo
&& desc
&& (linfo
->align
!= 4096)) {
223 virtio_queue_set_rings(vdev
, index
, desc
, info
->avail
, info
->used
);
225 virtio_queue_set_addr(vdev
, index
, desc
);
228 virtio_queue_set_vector(vdev
, index
, VIRTIO_NO_VECTOR
);
231 /* virtio-1 allows changing the ring size. */
232 if (virtio_queue_get_max_num(vdev
, index
) < num
) {
233 /* Fail if we exceed the maximum number. */
236 virtio_queue_set_num(vdev
, index
, num
);
237 } else if (virtio_queue_get_num(vdev
, index
) > num
) {
238 /* Fail if we don't have a big enough queue. */
241 /* We ignore possible increased num for legacy for compatibility. */
242 virtio_queue_set_vector(vdev
, index
, index
);
244 /* tell notify handler in case of config change */
245 vdev
->config_vector
= VIRTIO_QUEUE_MAX
;
249 static void virtio_ccw_reset_virtio(VirtioCcwDevice
*dev
, VirtIODevice
*vdev
)
251 CcwDevice
*ccw_dev
= CCW_DEVICE(dev
);
253 virtio_ccw_stop_ioeventfd(dev
);
255 if (dev
->indicators
) {
256 release_indicator(&dev
->routes
.adapter
, dev
->indicators
);
257 dev
->indicators
= NULL
;
259 if (dev
->indicators2
) {
260 release_indicator(&dev
->routes
.adapter
, dev
->indicators2
);
261 dev
->indicators2
= NULL
;
263 if (dev
->summary_indicator
) {
264 release_indicator(&dev
->routes
.adapter
, dev
->summary_indicator
);
265 dev
->summary_indicator
= NULL
;
267 ccw_dev
->sch
->thinint_active
= false;
270 static int virtio_ccw_handle_set_vq(SubchDev
*sch
, CCW1 ccw
, bool check_len
,
275 VqInfoBlockLegacy linfo
;
276 size_t info_len
= is_legacy
? sizeof(linfo
) : sizeof(info
);
279 if (ccw
.count
!= info_len
) {
282 } else if (ccw
.count
< info_len
) {
283 /* Can't execute command. */
290 ccw_dstream_read(&sch
->cds
, linfo
);
291 linfo
.queue
= be64_to_cpu(linfo
.queue
);
292 linfo
.align
= be32_to_cpu(linfo
.align
);
293 linfo
.index
= be16_to_cpu(linfo
.index
);
294 linfo
.num
= be16_to_cpu(linfo
.num
);
295 ret
= virtio_ccw_set_vqs(sch
, NULL
, &linfo
);
297 ccw_dstream_read(&sch
->cds
, info
);
298 info
.desc
= be64_to_cpu(info
.desc
);
299 info
.index
= be16_to_cpu(info
.index
);
300 info
.num
= be16_to_cpu(info
.num
);
301 info
.avail
= be64_to_cpu(info
.avail
);
302 info
.used
= be64_to_cpu(info
.used
);
303 ret
= virtio_ccw_set_vqs(sch
, &info
, NULL
);
305 sch
->curr_status
.scsw
.count
= 0;
309 static int virtio_ccw_cb(SubchDev
*sch
, CCW1 ccw
)
312 VirtioRevInfo revinfo
;
314 VirtioFeatDesc features
;
316 VqConfigBlock vq_config
;
317 VirtioCcwDevice
*dev
= sch
->driver_data
;
318 VirtIODevice
*vdev
= virtio_ccw_get_vdev(sch
);
321 VirtioThinintInfo thinint
;
327 trace_virtio_ccw_interpret_ccw(sch
->cssid
, sch
->ssid
, sch
->schid
,
329 check_len
= !((ccw
.flags
& CCW_FLAG_SLI
) && !(ccw
.flags
& CCW_FLAG_DC
));
331 if (dev
->force_revision_1
&& dev
->revision
< 0 &&
332 ccw
.cmd_code
!= CCW_CMD_SET_VIRTIO_REV
) {
334 * virtio-1 drivers must start with negotiating to a revision >= 1,
335 * so post a command reject for all other commands
340 /* Look at the command. */
341 switch (ccw
.cmd_code
) {
343 ret
= virtio_ccw_handle_set_vq(sch
, ccw
, check_len
, dev
->revision
< 1);
345 case CCW_CMD_VDEV_RESET
:
346 virtio_ccw_reset_virtio(dev
, vdev
);
349 case CCW_CMD_READ_FEAT
:
351 if (ccw
.count
!= sizeof(features
)) {
355 } else if (ccw
.count
< sizeof(features
)) {
356 /* Can't execute command. */
363 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
365 ccw_dstream_advance(&sch
->cds
, sizeof(features
.features
));
366 ccw_dstream_read(&sch
->cds
, features
.index
);
367 if (features
.index
== 0) {
368 if (dev
->revision
>= 1) {
369 /* Don't offer legacy features for modern devices. */
370 features
.features
= (uint32_t)
371 (vdev
->host_features
& ~vdc
->legacy_features
);
373 features
.features
= (uint32_t)vdev
->host_features
;
375 } else if ((features
.index
== 1) && (dev
->revision
>= 1)) {
377 * Only offer feature bits beyond 31 if the guest has
378 * negotiated at least revision 1.
380 features
.features
= (uint32_t)(vdev
->host_features
>> 32);
382 /* Return zeroes if the guest supports more feature bits. */
383 features
.features
= 0;
385 ccw_dstream_rewind(&sch
->cds
);
386 features
.features
= cpu_to_le32(features
.features
);
387 ccw_dstream_write(&sch
->cds
, features
.features
);
388 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(features
);
392 case CCW_CMD_WRITE_FEAT
:
394 if (ccw
.count
!= sizeof(features
)) {
398 } else if (ccw
.count
< sizeof(features
)) {
399 /* Can't execute command. */
406 ccw_dstream_read(&sch
->cds
, features
);
407 features
.features
= le32_to_cpu(features
.features
);
408 if (features
.index
== 0) {
409 virtio_set_features(vdev
,
410 (vdev
->guest_features
& 0xffffffff00000000ULL
) |
412 } else if ((features
.index
== 1) && (dev
->revision
>= 1)) {
414 * If the guest did not negotiate at least revision 1,
415 * we did not offer it any feature bits beyond 31. Such a
416 * guest passing us any bit here is therefore buggy.
418 virtio_set_features(vdev
,
419 (vdev
->guest_features
& 0x00000000ffffffffULL
) |
420 ((uint64_t)features
.features
<< 32));
423 * If the guest supports more feature bits, assert that it
424 * passes us zeroes for those we don't support.
426 if (features
.features
) {
427 qemu_log_mask(LOG_GUEST_ERROR
,
428 "Guest bug: features[%i]=%x (expected 0)",
429 features
.index
, features
.features
);
430 /* XXX: do a unit check here? */
433 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(features
);
437 case CCW_CMD_READ_CONF
:
439 if (ccw
.count
> vdev
->config_len
) {
444 len
= MIN(ccw
.count
, vdev
->config_len
);
448 virtio_bus_get_vdev_config(&dev
->bus
, vdev
->config
);
449 ccw_dstream_write_buf(&sch
->cds
, vdev
->config
, len
);
450 sch
->curr_status
.scsw
.count
= ccw
.count
- len
;
454 case CCW_CMD_WRITE_CONF
:
456 if (ccw
.count
> vdev
->config_len
) {
461 len
= MIN(ccw
.count
, vdev
->config_len
);
465 ret
= ccw_dstream_read_buf(&sch
->cds
, vdev
->config
, len
);
467 virtio_bus_set_vdev_config(&dev
->bus
, vdev
->config
);
468 sch
->curr_status
.scsw
.count
= ccw
.count
- len
;
472 case CCW_CMD_READ_STATUS
:
474 if (ccw
.count
!= sizeof(status
)) {
478 } else if (ccw
.count
< sizeof(status
)) {
479 /* Can't execute command. */
486 address_space_stb(&address_space_memory
, ccw
.cda
, vdev
->status
,
487 MEMTXATTRS_UNSPECIFIED
, NULL
);
488 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(vdev
->status
);
492 case CCW_CMD_WRITE_STATUS
:
494 if (ccw
.count
!= sizeof(status
)) {
498 } else if (ccw
.count
< sizeof(status
)) {
499 /* Can't execute command. */
506 ccw_dstream_read(&sch
->cds
, status
);
507 if (!(status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
508 virtio_ccw_stop_ioeventfd(dev
);
510 if (virtio_set_status(vdev
, status
) == 0) {
511 if (vdev
->status
== 0) {
512 virtio_ccw_reset_virtio(dev
, vdev
);
514 if (status
& VIRTIO_CONFIG_S_DRIVER_OK
) {
515 virtio_ccw_start_ioeventfd(dev
);
517 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(status
);
520 /* Trigger a command reject. */
525 case CCW_CMD_SET_IND
:
527 if (ccw
.count
!= sizeof(indicators
)) {
531 } else if (ccw
.count
< sizeof(indicators
)) {
532 /* Can't execute command. */
536 if (sch
->thinint_active
) {
537 /* Trigger a command reject. */
541 if (virtio_get_num_queues(vdev
) > NR_CLASSIC_INDICATOR_BITS
) {
542 /* More queues than indicator bits --> trigger a reject */
549 ccw_dstream_read(&sch
->cds
, indicators
);
550 indicators
= be64_to_cpu(indicators
);
551 dev
->indicators
= get_indicator(indicators
, sizeof(uint64_t));
552 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(indicators
);
556 case CCW_CMD_SET_CONF_IND
:
558 if (ccw
.count
!= sizeof(indicators
)) {
562 } else if (ccw
.count
< sizeof(indicators
)) {
563 /* Can't execute command. */
570 ccw_dstream_read(&sch
->cds
, indicators
);
571 indicators
= be64_to_cpu(indicators
);
572 dev
->indicators2
= get_indicator(indicators
, sizeof(uint64_t));
573 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(indicators
);
577 case CCW_CMD_READ_VQ_CONF
:
579 if (ccw
.count
!= sizeof(vq_config
)) {
583 } else if (ccw
.count
< sizeof(vq_config
)) {
584 /* Can't execute command. */
591 ccw_dstream_read(&sch
->cds
, vq_config
.index
);
592 vq_config
.index
= be16_to_cpu(vq_config
.index
);
593 if (vq_config
.index
>= VIRTIO_QUEUE_MAX
) {
597 vq_config
.num_max
= virtio_queue_get_num(vdev
,
599 vq_config
.num_max
= cpu_to_be16(vq_config
.num_max
);
600 ccw_dstream_write(&sch
->cds
, vq_config
.num_max
);
601 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(vq_config
);
605 case CCW_CMD_SET_IND_ADAPTER
:
607 if (ccw
.count
!= sizeof(thinint
)) {
611 } else if (ccw
.count
< sizeof(thinint
)) {
612 /* Can't execute command. */
618 } else if (dev
->indicators
&& !sch
->thinint_active
) {
619 /* Trigger a command reject. */
622 if (ccw_dstream_read(&sch
->cds
, thinint
)) {
625 thinint
.ind_bit
= be64_to_cpu(thinint
.ind_bit
);
626 thinint
.summary_indicator
=
627 be64_to_cpu(thinint
.summary_indicator
);
628 thinint
.device_indicator
=
629 be64_to_cpu(thinint
.device_indicator
);
631 dev
->summary_indicator
=
632 get_indicator(thinint
.summary_indicator
, sizeof(uint8_t));
634 get_indicator(thinint
.device_indicator
,
635 thinint
.ind_bit
/ 8 + 1);
636 dev
->thinint_isc
= thinint
.isc
;
637 dev
->routes
.adapter
.ind_offset
= thinint
.ind_bit
;
638 dev
->routes
.adapter
.summary_offset
= 7;
639 dev
->routes
.adapter
.adapter_id
= css_get_adapter_id(
640 CSS_IO_ADAPTER_VIRTIO
,
642 sch
->thinint_active
= ((dev
->indicators
!= NULL
) &&
643 (dev
->summary_indicator
!= NULL
));
644 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(thinint
);
649 case CCW_CMD_SET_VIRTIO_REV
:
650 len
= sizeof(revinfo
);
651 if (ccw
.count
< len
) {
659 ccw_dstream_read_buf(&sch
->cds
, &revinfo
, 4);
660 revinfo
.revision
= be16_to_cpu(revinfo
.revision
);
661 revinfo
.length
= be16_to_cpu(revinfo
.length
);
662 if (ccw
.count
< len
+ revinfo
.length
||
663 (check_len
&& ccw
.count
> len
+ revinfo
.length
)) {
668 * Once we start to support revisions with additional data, we'll
669 * need to fetch it here. Nothing to do for now, though.
671 if (dev
->revision
>= 0 ||
672 revinfo
.revision
> virtio_ccw_rev_max(dev
) ||
673 (dev
->force_revision_1
&& !revinfo
.revision
)) {
678 dev
->revision
= revinfo
.revision
;
687 static void virtio_sch_disable_cb(SubchDev
*sch
)
689 VirtioCcwDevice
*dev
= sch
->driver_data
;
694 static void virtio_ccw_device_realize(VirtioCcwDevice
*dev
, Error
**errp
)
696 VirtIOCCWDeviceClass
*k
= VIRTIO_CCW_DEVICE_GET_CLASS(dev
);
697 CcwDevice
*ccw_dev
= CCW_DEVICE(dev
);
698 CCWDeviceClass
*ck
= CCW_DEVICE_GET_CLASS(ccw_dev
);
702 sch
= css_create_sch(ccw_dev
->devno
, errp
);
706 if (!virtio_ccw_rev_max(dev
) && dev
->force_revision_1
) {
707 error_setg(&err
, "Invalid value of property max_rev "
708 "(is %d expected >= 1)", virtio_ccw_rev_max(dev
));
712 sch
->driver_data
= dev
;
713 sch
->ccw_cb
= virtio_ccw_cb
;
714 sch
->disable_cb
= virtio_sch_disable_cb
;
715 sch
->id
.reserved
= 0xff;
716 sch
->id
.cu_type
= VIRTIO_CCW_CU_TYPE
;
717 sch
->do_subchannel_work
= do_subchannel_work_virtual
;
719 dev
->indicators
= NULL
;
721 css_sch_build_virtual_schib(sch
, 0, VIRTIO_CCW_CHPID_TYPE
);
723 trace_virtio_ccw_new_device(
724 sch
->cssid
, sch
->ssid
, sch
->schid
, sch
->devno
,
725 ccw_dev
->devno
.valid
? "user-configured" : "auto-configured");
727 if (kvm_enabled() && !kvm_eventfds_enabled()) {
728 dev
->flags
&= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD
;
732 k
->realize(dev
, &err
);
738 ck
->realize(ccw_dev
, &err
);
746 error_propagate(errp
, err
);
747 css_subch_assign(sch
->cssid
, sch
->ssid
, sch
->schid
, sch
->devno
, NULL
);
752 static void virtio_ccw_device_unrealize(VirtioCcwDevice
*dev
, Error
**errp
)
754 VirtIOCCWDeviceClass
*dc
= VIRTIO_CCW_DEVICE_GET_CLASS(dev
);
755 CcwDevice
*ccw_dev
= CCW_DEVICE(dev
);
756 SubchDev
*sch
= ccw_dev
->sch
;
759 dc
->unrealize(dev
, errp
);
763 css_subch_assign(sch
->cssid
, sch
->ssid
, sch
->schid
, sch
->devno
, NULL
);
767 if (dev
->indicators
) {
768 release_indicator(&dev
->routes
.adapter
, dev
->indicators
);
769 dev
->indicators
= NULL
;
773 /* DeviceState to VirtioCcwDevice. Note: used on datapath,
774 * be careful and test performance if you change this.
776 static inline VirtioCcwDevice
*to_virtio_ccw_dev_fast(DeviceState
*d
)
778 CcwDevice
*ccw_dev
= to_ccw_dev_fast(d
);
780 return container_of(ccw_dev
, VirtioCcwDevice
, parent_obj
);
783 static uint8_t virtio_set_ind_atomic(SubchDev
*sch
, uint64_t ind_loc
,
786 uint8_t ind_old
, ind_new
;
790 ind_addr
= cpu_physical_memory_map(ind_loc
, &len
, 1);
792 error_report("%s(%x.%x.%04x): unable to access indicator",
793 __func__
, sch
->cssid
, sch
->ssid
, sch
->schid
);
798 ind_new
= ind_old
| to_be_set
;
799 } while (atomic_cmpxchg(ind_addr
, ind_old
, ind_new
) != ind_old
);
800 trace_virtio_ccw_set_ind(ind_loc
, ind_old
, ind_new
);
801 cpu_physical_memory_unmap(ind_addr
, len
, 1, len
);
806 static void virtio_ccw_notify(DeviceState
*d
, uint16_t vector
)
808 VirtioCcwDevice
*dev
= to_virtio_ccw_dev_fast(d
);
809 CcwDevice
*ccw_dev
= to_ccw_dev_fast(d
);
810 SubchDev
*sch
= ccw_dev
->sch
;
813 if (vector
== VIRTIO_NO_VECTOR
) {
817 * vector < VIRTIO_QUEUE_MAX: notification for a virtqueue
818 * vector == VIRTIO_QUEUE_MAX: configuration change notification
819 * bits beyond that are unused and should never be notified for
821 assert(vector
<= VIRTIO_QUEUE_MAX
);
823 if (vector
< VIRTIO_QUEUE_MAX
) {
824 if (!dev
->indicators
) {
827 if (sch
->thinint_active
) {
829 * In the adapter interrupt case, indicators points to a
830 * memory area that may be (way) larger than 64 bit and
831 * ind_bit indicates the start of the indicators in a big
834 uint64_t ind_bit
= dev
->routes
.adapter
.ind_offset
;
836 virtio_set_ind_atomic(sch
, dev
->indicators
->addr
+
837 (ind_bit
+ vector
) / 8,
838 0x80 >> ((ind_bit
+ vector
) % 8));
839 if (!virtio_set_ind_atomic(sch
, dev
->summary_indicator
->addr
,
841 css_adapter_interrupt(CSS_IO_ADAPTER_VIRTIO
, dev
->thinint_isc
);
844 assert(vector
< NR_CLASSIC_INDICATOR_BITS
);
845 indicators
= address_space_ldq(&address_space_memory
,
846 dev
->indicators
->addr
,
847 MEMTXATTRS_UNSPECIFIED
,
849 indicators
|= 1ULL << vector
;
850 address_space_stq(&address_space_memory
, dev
->indicators
->addr
,
851 indicators
, MEMTXATTRS_UNSPECIFIED
, NULL
);
852 css_conditional_io_interrupt(sch
);
855 if (!dev
->indicators2
) {
858 indicators
= address_space_ldq(&address_space_memory
,
859 dev
->indicators2
->addr
,
860 MEMTXATTRS_UNSPECIFIED
,
863 address_space_stq(&address_space_memory
, dev
->indicators2
->addr
,
864 indicators
, MEMTXATTRS_UNSPECIFIED
, NULL
);
865 css_conditional_io_interrupt(sch
);
869 static void virtio_ccw_reset(DeviceState
*d
)
871 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
872 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
873 VirtIOCCWDeviceClass
*vdc
= VIRTIO_CCW_DEVICE_GET_CLASS(dev
);
875 virtio_ccw_reset_virtio(dev
, vdev
);
876 if (vdc
->parent_reset
) {
877 vdc
->parent_reset(d
);
881 static void virtio_ccw_vmstate_change(DeviceState
*d
, bool running
)
883 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
886 virtio_ccw_start_ioeventfd(dev
);
888 virtio_ccw_stop_ioeventfd(dev
);
892 static bool virtio_ccw_query_guest_notifiers(DeviceState
*d
)
894 CcwDevice
*dev
= CCW_DEVICE(d
);
896 return !!(dev
->sch
->curr_status
.pmcw
.flags
& PMCW_FLAGS_MASK_ENA
);
899 static int virtio_ccw_get_mappings(VirtioCcwDevice
*dev
)
902 CcwDevice
*ccw_dev
= CCW_DEVICE(dev
);
904 if (!ccw_dev
->sch
->thinint_active
) {
908 r
= map_indicator(&dev
->routes
.adapter
, dev
->summary_indicator
);
912 r
= map_indicator(&dev
->routes
.adapter
, dev
->indicators
);
916 dev
->routes
.adapter
.summary_addr
= dev
->summary_indicator
->map
;
917 dev
->routes
.adapter
.ind_addr
= dev
->indicators
->map
;
922 static int virtio_ccw_setup_irqroutes(VirtioCcwDevice
*dev
, int nvqs
)
925 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
927 S390FLICState
*fs
= s390_get_flic();
928 S390FLICStateClass
*fsc
= s390_get_flic_class(fs
);
930 ret
= virtio_ccw_get_mappings(dev
);
934 for (i
= 0; i
< nvqs
; i
++) {
935 if (!virtio_queue_get_num(vdev
, i
)) {
939 dev
->routes
.num_routes
= i
;
940 return fsc
->add_adapter_routes(fs
, &dev
->routes
);
943 static void virtio_ccw_release_irqroutes(VirtioCcwDevice
*dev
, int nvqs
)
945 S390FLICState
*fs
= s390_get_flic();
946 S390FLICStateClass
*fsc
= s390_get_flic_class(fs
);
948 fsc
->release_adapter_routes(fs
, &dev
->routes
);
951 static int virtio_ccw_add_irqfd(VirtioCcwDevice
*dev
, int n
)
953 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
954 VirtQueue
*vq
= virtio_get_queue(vdev
, n
);
955 EventNotifier
*notifier
= virtio_queue_get_guest_notifier(vq
);
957 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state
, notifier
, NULL
,
961 static void virtio_ccw_remove_irqfd(VirtioCcwDevice
*dev
, int n
)
963 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
964 VirtQueue
*vq
= virtio_get_queue(vdev
, n
);
965 EventNotifier
*notifier
= virtio_queue_get_guest_notifier(vq
);
968 ret
= kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state
, notifier
,
973 static int virtio_ccw_set_guest_notifier(VirtioCcwDevice
*dev
, int n
,
974 bool assign
, bool with_irqfd
)
976 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
977 VirtQueue
*vq
= virtio_get_queue(vdev
, n
);
978 EventNotifier
*notifier
= virtio_queue_get_guest_notifier(vq
);
979 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
982 int r
= event_notifier_init(notifier
, 0);
987 virtio_queue_set_guest_notifier_fd_handler(vq
, true, with_irqfd
);
989 r
= virtio_ccw_add_irqfd(dev
, n
);
991 virtio_queue_set_guest_notifier_fd_handler(vq
, false,
997 * We do not support individual masking for channel devices, so we
998 * need to manually trigger any guest masking callbacks here.
1000 if (k
->guest_notifier_mask
&& vdev
->use_guest_notifier_mask
) {
1001 k
->guest_notifier_mask(vdev
, n
, false);
1003 /* get lost events and re-inject */
1004 if (k
->guest_notifier_pending
&&
1005 k
->guest_notifier_pending(vdev
, n
)) {
1006 event_notifier_set(notifier
);
1009 if (k
->guest_notifier_mask
&& vdev
->use_guest_notifier_mask
) {
1010 k
->guest_notifier_mask(vdev
, n
, true);
1013 virtio_ccw_remove_irqfd(dev
, n
);
1015 virtio_queue_set_guest_notifier_fd_handler(vq
, false, with_irqfd
);
1016 event_notifier_cleanup(notifier
);
1021 static int virtio_ccw_set_guest_notifiers(DeviceState
*d
, int nvqs
,
1024 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1025 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1026 CcwDevice
*ccw_dev
= CCW_DEVICE(d
);
1027 bool with_irqfd
= ccw_dev
->sch
->thinint_active
&& kvm_irqfds_enabled();
1030 if (with_irqfd
&& assigned
) {
1031 /* irq routes need to be set up before assigning irqfds */
1032 r
= virtio_ccw_setup_irqroutes(dev
, nvqs
);
1034 goto irqroute_error
;
1037 for (n
= 0; n
< nvqs
; n
++) {
1038 if (!virtio_queue_get_num(vdev
, n
)) {
1041 r
= virtio_ccw_set_guest_notifier(dev
, n
, assigned
, with_irqfd
);
1046 if (with_irqfd
&& !assigned
) {
1047 /* release irq routes after irqfds have been released */
1048 virtio_ccw_release_irqroutes(dev
, nvqs
);
1054 virtio_ccw_set_guest_notifier(dev
, n
, !assigned
, false);
1057 if (with_irqfd
&& assigned
) {
1058 virtio_ccw_release_irqroutes(dev
, nvqs
);
1063 static void virtio_ccw_save_queue(DeviceState
*d
, int n
, QEMUFile
*f
)
1065 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1066 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1068 qemu_put_be16(f
, virtio_queue_vector(vdev
, n
));
1071 static int virtio_ccw_load_queue(DeviceState
*d
, int n
, QEMUFile
*f
)
1073 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1074 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1077 qemu_get_be16s(f
, &vector
);
1078 virtio_queue_set_vector(vdev
, n
, vector
);
1083 static void virtio_ccw_save_config(DeviceState
*d
, QEMUFile
*f
)
1085 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1086 vmstate_save_state(f
, &vmstate_virtio_ccw_dev
, dev
, NULL
);
1089 static int virtio_ccw_load_config(DeviceState
*d
, QEMUFile
*f
)
1091 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1092 return vmstate_load_state(f
, &vmstate_virtio_ccw_dev
, dev
, 1);
1095 static void virtio_ccw_pre_plugged(DeviceState
*d
, Error
**errp
)
1097 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1098 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1100 if (dev
->max_rev
>= 1) {
1101 virtio_add_feature(&vdev
->host_features
, VIRTIO_F_VERSION_1
);
1105 /* This is called by virtio-bus just after the device is plugged. */
1106 static void virtio_ccw_device_plugged(DeviceState
*d
, Error
**errp
)
1108 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1109 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1110 CcwDevice
*ccw_dev
= CCW_DEVICE(d
);
1111 SubchDev
*sch
= ccw_dev
->sch
;
1112 int n
= virtio_get_num_queues(vdev
);
1113 S390FLICState
*flic
= s390_get_flic();
1115 if (!virtio_has_feature(vdev
->host_features
, VIRTIO_F_VERSION_1
)) {
1119 if (virtio_get_num_queues(vdev
) > VIRTIO_QUEUE_MAX
) {
1120 error_setg(errp
, "The number of virtqueues %d "
1121 "exceeds virtio limit %d", n
,
1125 if (virtio_get_num_queues(vdev
) > flic
->adapter_routes_max_batch
) {
1126 error_setg(errp
, "The number of virtqueues %d "
1127 "exceeds flic adapter route limit %d", n
,
1128 flic
->adapter_routes_max_batch
);
1132 sch
->id
.cu_model
= virtio_bus_get_vdev_id(&dev
->bus
);
1135 css_generate_sch_crws(sch
->cssid
, sch
->ssid
, sch
->schid
,
1139 static void virtio_ccw_device_unplugged(DeviceState
*d
)
1141 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1143 virtio_ccw_stop_ioeventfd(dev
);
1145 /**************** Virtio-ccw Bus Device Descriptions *******************/
1147 static void virtio_ccw_busdev_realize(DeviceState
*dev
, Error
**errp
)
1149 VirtioCcwDevice
*_dev
= (VirtioCcwDevice
*)dev
;
1151 virtio_ccw_bus_new(&_dev
->bus
, sizeof(_dev
->bus
), _dev
);
1152 virtio_ccw_device_realize(_dev
, errp
);
1155 static void virtio_ccw_busdev_unrealize(DeviceState
*dev
, Error
**errp
)
1157 VirtioCcwDevice
*_dev
= (VirtioCcwDevice
*)dev
;
1159 virtio_ccw_device_unrealize(_dev
, errp
);
1162 static void virtio_ccw_busdev_unplug(HotplugHandler
*hotplug_dev
,
1163 DeviceState
*dev
, Error
**errp
)
1165 VirtioCcwDevice
*_dev
= to_virtio_ccw_dev_fast(dev
);
1167 virtio_ccw_stop_ioeventfd(_dev
);
1170 static void virtio_ccw_device_class_init(ObjectClass
*klass
, void *data
)
1172 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1173 CCWDeviceClass
*k
= CCW_DEVICE_CLASS(dc
);
1174 VirtIOCCWDeviceClass
*vdc
= VIRTIO_CCW_DEVICE_CLASS(klass
);
1176 k
->unplug
= virtio_ccw_busdev_unplug
;
1177 dc
->realize
= virtio_ccw_busdev_realize
;
1178 dc
->unrealize
= virtio_ccw_busdev_unrealize
;
1179 dc
->bus_type
= TYPE_VIRTUAL_CSS_BUS
;
1180 device_class_set_parent_reset(dc
, virtio_ccw_reset
, &vdc
->parent_reset
);
1183 static const TypeInfo virtio_ccw_device_info
= {
1184 .name
= TYPE_VIRTIO_CCW_DEVICE
,
1185 .parent
= TYPE_CCW_DEVICE
,
1186 .instance_size
= sizeof(VirtioCcwDevice
),
1187 .class_init
= virtio_ccw_device_class_init
,
1188 .class_size
= sizeof(VirtIOCCWDeviceClass
),
1192 /* virtio-ccw-bus */
1194 static void virtio_ccw_bus_new(VirtioBusState
*bus
, size_t bus_size
,
1195 VirtioCcwDevice
*dev
)
1197 DeviceState
*qdev
= DEVICE(dev
);
1198 char virtio_bus_name
[] = "virtio-bus";
1200 qbus_create_inplace(bus
, bus_size
, TYPE_VIRTIO_CCW_BUS
,
1201 qdev
, virtio_bus_name
);
1204 static void virtio_ccw_bus_class_init(ObjectClass
*klass
, void *data
)
1206 VirtioBusClass
*k
= VIRTIO_BUS_CLASS(klass
);
1207 BusClass
*bus_class
= BUS_CLASS(klass
);
1209 bus_class
->max_dev
= 1;
1210 k
->notify
= virtio_ccw_notify
;
1211 k
->vmstate_change
= virtio_ccw_vmstate_change
;
1212 k
->query_guest_notifiers
= virtio_ccw_query_guest_notifiers
;
1213 k
->set_guest_notifiers
= virtio_ccw_set_guest_notifiers
;
1214 k
->save_queue
= virtio_ccw_save_queue
;
1215 k
->load_queue
= virtio_ccw_load_queue
;
1216 k
->save_config
= virtio_ccw_save_config
;
1217 k
->load_config
= virtio_ccw_load_config
;
1218 k
->pre_plugged
= virtio_ccw_pre_plugged
;
1219 k
->device_plugged
= virtio_ccw_device_plugged
;
1220 k
->device_unplugged
= virtio_ccw_device_unplugged
;
1221 k
->ioeventfd_enabled
= virtio_ccw_ioeventfd_enabled
;
1222 k
->ioeventfd_assign
= virtio_ccw_ioeventfd_assign
;
1225 static const TypeInfo virtio_ccw_bus_info
= {
1226 .name
= TYPE_VIRTIO_CCW_BUS
,
1227 .parent
= TYPE_VIRTIO_BUS
,
1228 .instance_size
= sizeof(VirtioCcwBusState
),
1229 .class_init
= virtio_ccw_bus_class_init
,
1232 static void virtio_ccw_register(void)
1234 type_register_static(&virtio_ccw_bus_info
);
1235 type_register_static(&virtio_ccw_device_info
);
1238 type_init(virtio_ccw_register
)