]> git.proxmox.com Git - mirror_qemu.git/blob - hw/s390x/virtio-ccw.c
a371370221f7a812a43ac1f7c1338b69f6c0ed2a
[mirror_qemu.git] / hw / s390x / virtio-ccw.c
1 /*
2 * virtio ccw target implementation
3 *
4 * Copyright 2012,2015 IBM Corp.
5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 * Pierre Morel <pmorel@linux.vnet.ibm.com>
7 *
8 * This work is licensed under the terms of the GNU GPL, version 2 or (at
9 * your option) any later version. See the COPYING file in the top-level
10 * directory.
11 */
12
13 #include "qemu/osdep.h"
14 #include "qapi/error.h"
15 #include "sysemu/sysemu.h"
16 #include "sysemu/kvm.h"
17 #include "net/net.h"
18 #include "hw/virtio/virtio.h"
19 #include "migration/qemu-file-types.h"
20 #include "hw/virtio/virtio-net.h"
21 #include "hw/sysbus.h"
22 #include "qemu/bitops.h"
23 #include "qemu/error-report.h"
24 #include "qemu/module.h"
25 #include "hw/virtio/virtio-access.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "hw/s390x/adapter.h"
28 #include "hw/s390x/s390_flic.h"
29
30 #include "hw/s390x/ioinst.h"
31 #include "hw/s390x/css.h"
32 #include "virtio-ccw.h"
33 #include "trace.h"
34 #include "hw/s390x/css-bridge.h"
35 #include "hw/s390x/s390-virtio-ccw.h"
36
37 #define NR_CLASSIC_INDICATOR_BITS 64
38
39 static int virtio_ccw_dev_post_load(void *opaque, int version_id)
40 {
41 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(opaque);
42 CcwDevice *ccw_dev = CCW_DEVICE(dev);
43 CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev);
44
45 ccw_dev->sch->driver_data = dev;
46 if (ccw_dev->sch->thinint_active) {
47 dev->routes.adapter.adapter_id = css_get_adapter_id(
48 CSS_IO_ADAPTER_VIRTIO,
49 dev->thinint_isc);
50 }
51 /* Re-fill subch_id after loading the subchannel states.*/
52 if (ck->refill_ids) {
53 ck->refill_ids(ccw_dev);
54 }
55 return 0;
56 }
57
58 typedef struct VirtioCcwDeviceTmp {
59 VirtioCcwDevice *parent;
60 uint16_t config_vector;
61 } VirtioCcwDeviceTmp;
62
63 static int virtio_ccw_dev_tmp_pre_save(void *opaque)
64 {
65 VirtioCcwDeviceTmp *tmp = opaque;
66 VirtioCcwDevice *dev = tmp->parent;
67 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
68
69 tmp->config_vector = vdev->config_vector;
70
71 return 0;
72 }
73
74 static int virtio_ccw_dev_tmp_post_load(void *opaque, int version_id)
75 {
76 VirtioCcwDeviceTmp *tmp = opaque;
77 VirtioCcwDevice *dev = tmp->parent;
78 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
79
80 vdev->config_vector = tmp->config_vector;
81 return 0;
82 }
83
84 const VMStateDescription vmstate_virtio_ccw_dev_tmp = {
85 .name = "s390_virtio_ccw_dev_tmp",
86 .pre_save = virtio_ccw_dev_tmp_pre_save,
87 .post_load = virtio_ccw_dev_tmp_post_load,
88 .fields = (VMStateField[]) {
89 VMSTATE_UINT16(config_vector, VirtioCcwDeviceTmp),
90 VMSTATE_END_OF_LIST()
91 }
92 };
93
94 const VMStateDescription vmstate_virtio_ccw_dev = {
95 .name = "s390_virtio_ccw_dev",
96 .version_id = 1,
97 .minimum_version_id = 1,
98 .post_load = virtio_ccw_dev_post_load,
99 .fields = (VMStateField[]) {
100 VMSTATE_CCW_DEVICE(parent_obj, VirtioCcwDevice),
101 VMSTATE_PTR_TO_IND_ADDR(indicators, VirtioCcwDevice),
102 VMSTATE_PTR_TO_IND_ADDR(indicators2, VirtioCcwDevice),
103 VMSTATE_PTR_TO_IND_ADDR(summary_indicator, VirtioCcwDevice),
104 /*
105 * Ugly hack because VirtIODevice does not migrate itself.
106 * This also makes legacy via vmstate_save_state possible.
107 */
108 VMSTATE_WITH_TMP(VirtioCcwDevice, VirtioCcwDeviceTmp,
109 vmstate_virtio_ccw_dev_tmp),
110 VMSTATE_STRUCT(routes, VirtioCcwDevice, 1, vmstate_adapter_routes,
111 AdapterRoutes),
112 VMSTATE_UINT8(thinint_isc, VirtioCcwDevice),
113 VMSTATE_INT32(revision, VirtioCcwDevice),
114 VMSTATE_END_OF_LIST()
115 }
116 };
117
118 static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size,
119 VirtioCcwDevice *dev);
120
121 VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch)
122 {
123 VirtIODevice *vdev = NULL;
124 VirtioCcwDevice *dev = sch->driver_data;
125
126 if (dev) {
127 vdev = virtio_bus_get_device(&dev->bus);
128 }
129 return vdev;
130 }
131
132 static void virtio_ccw_start_ioeventfd(VirtioCcwDevice *dev)
133 {
134 virtio_bus_start_ioeventfd(&dev->bus);
135 }
136
137 static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice *dev)
138 {
139 virtio_bus_stop_ioeventfd(&dev->bus);
140 }
141
142 static bool virtio_ccw_ioeventfd_enabled(DeviceState *d)
143 {
144 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
145
146 return (dev->flags & VIRTIO_CCW_FLAG_USE_IOEVENTFD) != 0;
147 }
148
149 static int virtio_ccw_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
150 int n, bool assign)
151 {
152 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
153 CcwDevice *ccw_dev = CCW_DEVICE(dev);
154 SubchDev *sch = ccw_dev->sch;
155 uint32_t sch_id = (css_build_subchannel_id(sch) << 16) | sch->schid;
156
157 return s390_assign_subch_ioeventfd(notifier, sch_id, n, assign);
158 }
159
160 /* Communication blocks used by several channel commands. */
161 typedef struct VqInfoBlockLegacy {
162 uint64_t queue;
163 uint32_t align;
164 uint16_t index;
165 uint16_t num;
166 } QEMU_PACKED VqInfoBlockLegacy;
167
168 typedef struct VqInfoBlock {
169 uint64_t desc;
170 uint32_t res0;
171 uint16_t index;
172 uint16_t num;
173 uint64_t avail;
174 uint64_t used;
175 } QEMU_PACKED VqInfoBlock;
176
177 typedef struct VqConfigBlock {
178 uint16_t index;
179 uint16_t num_max;
180 } QEMU_PACKED VqConfigBlock;
181
182 typedef struct VirtioFeatDesc {
183 uint32_t features;
184 uint8_t index;
185 } QEMU_PACKED VirtioFeatDesc;
186
187 typedef struct VirtioThinintInfo {
188 hwaddr summary_indicator;
189 hwaddr device_indicator;
190 uint64_t ind_bit;
191 uint8_t isc;
192 } QEMU_PACKED VirtioThinintInfo;
193
194 typedef struct VirtioRevInfo {
195 uint16_t revision;
196 uint16_t length;
197 uint8_t data[0];
198 } QEMU_PACKED VirtioRevInfo;
199
200 /* Specify where the virtqueues for the subchannel are in guest memory. */
201 static int virtio_ccw_set_vqs(SubchDev *sch, VqInfoBlock *info,
202 VqInfoBlockLegacy *linfo)
203 {
204 VirtIODevice *vdev = virtio_ccw_get_vdev(sch);
205 uint16_t index = info ? info->index : linfo->index;
206 uint16_t num = info ? info->num : linfo->num;
207 uint64_t desc = info ? info->desc : linfo->queue;
208
209 if (index >= VIRTIO_QUEUE_MAX) {
210 return -EINVAL;
211 }
212
213 /* Current code in virtio.c relies on 4K alignment. */
214 if (linfo && desc && (linfo->align != 4096)) {
215 return -EINVAL;
216 }
217
218 if (!vdev) {
219 return -EINVAL;
220 }
221
222 if (info) {
223 virtio_queue_set_rings(vdev, index, desc, info->avail, info->used);
224 } else {
225 virtio_queue_set_addr(vdev, index, desc);
226 }
227 if (!desc) {
228 virtio_queue_set_vector(vdev, index, VIRTIO_NO_VECTOR);
229 } else {
230 if (info) {
231 /* virtio-1 allows changing the ring size. */
232 if (virtio_queue_get_max_num(vdev, index) < num) {
233 /* Fail if we exceed the maximum number. */
234 return -EINVAL;
235 }
236 virtio_queue_set_num(vdev, index, num);
237 } else if (virtio_queue_get_num(vdev, index) > num) {
238 /* Fail if we don't have a big enough queue. */
239 return -EINVAL;
240 }
241 /* We ignore possible increased num for legacy for compatibility. */
242 virtio_queue_set_vector(vdev, index, index);
243 }
244 /* tell notify handler in case of config change */
245 vdev->config_vector = VIRTIO_QUEUE_MAX;
246 return 0;
247 }
248
249 static void virtio_ccw_reset_virtio(VirtioCcwDevice *dev, VirtIODevice *vdev)
250 {
251 CcwDevice *ccw_dev = CCW_DEVICE(dev);
252
253 virtio_ccw_stop_ioeventfd(dev);
254 virtio_reset(vdev);
255 if (dev->indicators) {
256 release_indicator(&dev->routes.adapter, dev->indicators);
257 dev->indicators = NULL;
258 }
259 if (dev->indicators2) {
260 release_indicator(&dev->routes.adapter, dev->indicators2);
261 dev->indicators2 = NULL;
262 }
263 if (dev->summary_indicator) {
264 release_indicator(&dev->routes.adapter, dev->summary_indicator);
265 dev->summary_indicator = NULL;
266 }
267 ccw_dev->sch->thinint_active = false;
268 }
269
270 static int virtio_ccw_handle_set_vq(SubchDev *sch, CCW1 ccw, bool check_len,
271 bool is_legacy)
272 {
273 int ret;
274 VqInfoBlock info;
275 VqInfoBlockLegacy linfo;
276 size_t info_len = is_legacy ? sizeof(linfo) : sizeof(info);
277
278 if (check_len) {
279 if (ccw.count != info_len) {
280 return -EINVAL;
281 }
282 } else if (ccw.count < info_len) {
283 /* Can't execute command. */
284 return -EINVAL;
285 }
286 if (!ccw.cda) {
287 return -EFAULT;
288 }
289 if (is_legacy) {
290 ccw_dstream_read(&sch->cds, linfo);
291 linfo.queue = be64_to_cpu(linfo.queue);
292 linfo.align = be32_to_cpu(linfo.align);
293 linfo.index = be16_to_cpu(linfo.index);
294 linfo.num = be16_to_cpu(linfo.num);
295 ret = virtio_ccw_set_vqs(sch, NULL, &linfo);
296 } else {
297 ccw_dstream_read(&sch->cds, info);
298 info.desc = be64_to_cpu(info.desc);
299 info.index = be16_to_cpu(info.index);
300 info.num = be16_to_cpu(info.num);
301 info.avail = be64_to_cpu(info.avail);
302 info.used = be64_to_cpu(info.used);
303 ret = virtio_ccw_set_vqs(sch, &info, NULL);
304 }
305 sch->curr_status.scsw.count = 0;
306 return ret;
307 }
308
309 static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
310 {
311 int ret;
312 VirtioRevInfo revinfo;
313 uint8_t status;
314 VirtioFeatDesc features;
315 hwaddr indicators;
316 VqConfigBlock vq_config;
317 VirtioCcwDevice *dev = sch->driver_data;
318 VirtIODevice *vdev = virtio_ccw_get_vdev(sch);
319 bool check_len;
320 int len;
321 VirtioThinintInfo thinint;
322
323 if (!dev) {
324 return -EINVAL;
325 }
326
327 trace_virtio_ccw_interpret_ccw(sch->cssid, sch->ssid, sch->schid,
328 ccw.cmd_code);
329 check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC));
330
331 if (dev->force_revision_1 && dev->revision < 0 &&
332 ccw.cmd_code != CCW_CMD_SET_VIRTIO_REV) {
333 /*
334 * virtio-1 drivers must start with negotiating to a revision >= 1,
335 * so post a command reject for all other commands
336 */
337 return -ENOSYS;
338 }
339
340 /* Look at the command. */
341 switch (ccw.cmd_code) {
342 case CCW_CMD_SET_VQ:
343 ret = virtio_ccw_handle_set_vq(sch, ccw, check_len, dev->revision < 1);
344 break;
345 case CCW_CMD_VDEV_RESET:
346 virtio_ccw_reset_virtio(dev, vdev);
347 ret = 0;
348 break;
349 case CCW_CMD_READ_FEAT:
350 if (check_len) {
351 if (ccw.count != sizeof(features)) {
352 ret = -EINVAL;
353 break;
354 }
355 } else if (ccw.count < sizeof(features)) {
356 /* Can't execute command. */
357 ret = -EINVAL;
358 break;
359 }
360 if (!ccw.cda) {
361 ret = -EFAULT;
362 } else {
363 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
364
365 ccw_dstream_advance(&sch->cds, sizeof(features.features));
366 ccw_dstream_read(&sch->cds, features.index);
367 if (features.index == 0) {
368 if (dev->revision >= 1) {
369 /* Don't offer legacy features for modern devices. */
370 features.features = (uint32_t)
371 (vdev->host_features & ~vdc->legacy_features);
372 } else {
373 features.features = (uint32_t)vdev->host_features;
374 }
375 } else if ((features.index == 1) && (dev->revision >= 1)) {
376 /*
377 * Only offer feature bits beyond 31 if the guest has
378 * negotiated at least revision 1.
379 */
380 features.features = (uint32_t)(vdev->host_features >> 32);
381 } else {
382 /* Return zeroes if the guest supports more feature bits. */
383 features.features = 0;
384 }
385 ccw_dstream_rewind(&sch->cds);
386 features.features = cpu_to_le32(features.features);
387 ccw_dstream_write(&sch->cds, features.features);
388 sch->curr_status.scsw.count = ccw.count - sizeof(features);
389 ret = 0;
390 }
391 break;
392 case CCW_CMD_WRITE_FEAT:
393 if (check_len) {
394 if (ccw.count != sizeof(features)) {
395 ret = -EINVAL;
396 break;
397 }
398 } else if (ccw.count < sizeof(features)) {
399 /* Can't execute command. */
400 ret = -EINVAL;
401 break;
402 }
403 if (!ccw.cda) {
404 ret = -EFAULT;
405 } else {
406 ccw_dstream_read(&sch->cds, features);
407 features.features = le32_to_cpu(features.features);
408 if (features.index == 0) {
409 virtio_set_features(vdev,
410 (vdev->guest_features & 0xffffffff00000000ULL) |
411 features.features);
412 } else if ((features.index == 1) && (dev->revision >= 1)) {
413 /*
414 * If the guest did not negotiate at least revision 1,
415 * we did not offer it any feature bits beyond 31. Such a
416 * guest passing us any bit here is therefore buggy.
417 */
418 virtio_set_features(vdev,
419 (vdev->guest_features & 0x00000000ffffffffULL) |
420 ((uint64_t)features.features << 32));
421 } else {
422 /*
423 * If the guest supports more feature bits, assert that it
424 * passes us zeroes for those we don't support.
425 */
426 if (features.features) {
427 qemu_log_mask(LOG_GUEST_ERROR,
428 "Guest bug: features[%i]=%x (expected 0)",
429 features.index, features.features);
430 /* XXX: do a unit check here? */
431 }
432 }
433 sch->curr_status.scsw.count = ccw.count - sizeof(features);
434 ret = 0;
435 }
436 break;
437 case CCW_CMD_READ_CONF:
438 if (check_len) {
439 if (ccw.count > vdev->config_len) {
440 ret = -EINVAL;
441 break;
442 }
443 }
444 len = MIN(ccw.count, vdev->config_len);
445 if (!ccw.cda) {
446 ret = -EFAULT;
447 } else {
448 virtio_bus_get_vdev_config(&dev->bus, vdev->config);
449 ccw_dstream_write_buf(&sch->cds, vdev->config, len);
450 sch->curr_status.scsw.count = ccw.count - len;
451 ret = 0;
452 }
453 break;
454 case CCW_CMD_WRITE_CONF:
455 if (check_len) {
456 if (ccw.count > vdev->config_len) {
457 ret = -EINVAL;
458 break;
459 }
460 }
461 len = MIN(ccw.count, vdev->config_len);
462 if (!ccw.cda) {
463 ret = -EFAULT;
464 } else {
465 ret = ccw_dstream_read_buf(&sch->cds, vdev->config, len);
466 if (!ret) {
467 virtio_bus_set_vdev_config(&dev->bus, vdev->config);
468 sch->curr_status.scsw.count = ccw.count - len;
469 }
470 }
471 break;
472 case CCW_CMD_READ_STATUS:
473 if (check_len) {
474 if (ccw.count != sizeof(status)) {
475 ret = -EINVAL;
476 break;
477 }
478 } else if (ccw.count < sizeof(status)) {
479 /* Can't execute command. */
480 ret = -EINVAL;
481 break;
482 }
483 if (!ccw.cda) {
484 ret = -EFAULT;
485 } else {
486 address_space_stb(&address_space_memory, ccw.cda, vdev->status,
487 MEMTXATTRS_UNSPECIFIED, NULL);
488 sch->curr_status.scsw.count = ccw.count - sizeof(vdev->status);
489 ret = 0;
490 }
491 break;
492 case CCW_CMD_WRITE_STATUS:
493 if (check_len) {
494 if (ccw.count != sizeof(status)) {
495 ret = -EINVAL;
496 break;
497 }
498 } else if (ccw.count < sizeof(status)) {
499 /* Can't execute command. */
500 ret = -EINVAL;
501 break;
502 }
503 if (!ccw.cda) {
504 ret = -EFAULT;
505 } else {
506 ccw_dstream_read(&sch->cds, status);
507 if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
508 virtio_ccw_stop_ioeventfd(dev);
509 }
510 if (virtio_set_status(vdev, status) == 0) {
511 if (vdev->status == 0) {
512 virtio_ccw_reset_virtio(dev, vdev);
513 }
514 if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
515 virtio_ccw_start_ioeventfd(dev);
516 }
517 sch->curr_status.scsw.count = ccw.count - sizeof(status);
518 ret = 0;
519 } else {
520 /* Trigger a command reject. */
521 ret = -ENOSYS;
522 }
523 }
524 break;
525 case CCW_CMD_SET_IND:
526 if (check_len) {
527 if (ccw.count != sizeof(indicators)) {
528 ret = -EINVAL;
529 break;
530 }
531 } else if (ccw.count < sizeof(indicators)) {
532 /* Can't execute command. */
533 ret = -EINVAL;
534 break;
535 }
536 if (sch->thinint_active) {
537 /* Trigger a command reject. */
538 ret = -ENOSYS;
539 break;
540 }
541 if (virtio_get_num_queues(vdev) > NR_CLASSIC_INDICATOR_BITS) {
542 /* More queues than indicator bits --> trigger a reject */
543 ret = -ENOSYS;
544 break;
545 }
546 if (!ccw.cda) {
547 ret = -EFAULT;
548 } else {
549 ccw_dstream_read(&sch->cds, indicators);
550 indicators = be64_to_cpu(indicators);
551 dev->indicators = get_indicator(indicators, sizeof(uint64_t));
552 sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
553 ret = 0;
554 }
555 break;
556 case CCW_CMD_SET_CONF_IND:
557 if (check_len) {
558 if (ccw.count != sizeof(indicators)) {
559 ret = -EINVAL;
560 break;
561 }
562 } else if (ccw.count < sizeof(indicators)) {
563 /* Can't execute command. */
564 ret = -EINVAL;
565 break;
566 }
567 if (!ccw.cda) {
568 ret = -EFAULT;
569 } else {
570 ccw_dstream_read(&sch->cds, indicators);
571 indicators = be64_to_cpu(indicators);
572 dev->indicators2 = get_indicator(indicators, sizeof(uint64_t));
573 sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
574 ret = 0;
575 }
576 break;
577 case CCW_CMD_READ_VQ_CONF:
578 if (check_len) {
579 if (ccw.count != sizeof(vq_config)) {
580 ret = -EINVAL;
581 break;
582 }
583 } else if (ccw.count < sizeof(vq_config)) {
584 /* Can't execute command. */
585 ret = -EINVAL;
586 break;
587 }
588 if (!ccw.cda) {
589 ret = -EFAULT;
590 } else {
591 ccw_dstream_read(&sch->cds, vq_config.index);
592 vq_config.index = be16_to_cpu(vq_config.index);
593 if (vq_config.index >= VIRTIO_QUEUE_MAX) {
594 ret = -EINVAL;
595 break;
596 }
597 vq_config.num_max = virtio_queue_get_num(vdev,
598 vq_config.index);
599 vq_config.num_max = cpu_to_be16(vq_config.num_max);
600 ccw_dstream_write(&sch->cds, vq_config.num_max);
601 sch->curr_status.scsw.count = ccw.count - sizeof(vq_config);
602 ret = 0;
603 }
604 break;
605 case CCW_CMD_SET_IND_ADAPTER:
606 if (check_len) {
607 if (ccw.count != sizeof(thinint)) {
608 ret = -EINVAL;
609 break;
610 }
611 } else if (ccw.count < sizeof(thinint)) {
612 /* Can't execute command. */
613 ret = -EINVAL;
614 break;
615 }
616 if (!ccw.cda) {
617 ret = -EFAULT;
618 } else if (dev->indicators && !sch->thinint_active) {
619 /* Trigger a command reject. */
620 ret = -ENOSYS;
621 } else {
622 if (ccw_dstream_read(&sch->cds, thinint)) {
623 ret = -EFAULT;
624 } else {
625 thinint.ind_bit = be64_to_cpu(thinint.ind_bit);
626 thinint.summary_indicator =
627 be64_to_cpu(thinint.summary_indicator);
628 thinint.device_indicator =
629 be64_to_cpu(thinint.device_indicator);
630
631 dev->summary_indicator =
632 get_indicator(thinint.summary_indicator, sizeof(uint8_t));
633 dev->indicators =
634 get_indicator(thinint.device_indicator,
635 thinint.ind_bit / 8 + 1);
636 dev->thinint_isc = thinint.isc;
637 dev->routes.adapter.ind_offset = thinint.ind_bit;
638 dev->routes.adapter.summary_offset = 7;
639 dev->routes.adapter.adapter_id = css_get_adapter_id(
640 CSS_IO_ADAPTER_VIRTIO,
641 dev->thinint_isc);
642 sch->thinint_active = ((dev->indicators != NULL) &&
643 (dev->summary_indicator != NULL));
644 sch->curr_status.scsw.count = ccw.count - sizeof(thinint);
645 ret = 0;
646 }
647 }
648 break;
649 case CCW_CMD_SET_VIRTIO_REV:
650 len = sizeof(revinfo);
651 if (ccw.count < len) {
652 ret = -EINVAL;
653 break;
654 }
655 if (!ccw.cda) {
656 ret = -EFAULT;
657 break;
658 }
659 ccw_dstream_read_buf(&sch->cds, &revinfo, 4);
660 revinfo.revision = be16_to_cpu(revinfo.revision);
661 revinfo.length = be16_to_cpu(revinfo.length);
662 if (ccw.count < len + revinfo.length ||
663 (check_len && ccw.count > len + revinfo.length)) {
664 ret = -EINVAL;
665 break;
666 }
667 /*
668 * Once we start to support revisions with additional data, we'll
669 * need to fetch it here. Nothing to do for now, though.
670 */
671 if (dev->revision >= 0 ||
672 revinfo.revision > virtio_ccw_rev_max(dev) ||
673 (dev->force_revision_1 && !revinfo.revision)) {
674 ret = -ENOSYS;
675 break;
676 }
677 ret = 0;
678 dev->revision = revinfo.revision;
679 break;
680 default:
681 ret = -ENOSYS;
682 break;
683 }
684 return ret;
685 }
686
687 static void virtio_sch_disable_cb(SubchDev *sch)
688 {
689 VirtioCcwDevice *dev = sch->driver_data;
690
691 dev->revision = -1;
692 }
693
694 static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp)
695 {
696 VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_GET_CLASS(dev);
697 CcwDevice *ccw_dev = CCW_DEVICE(dev);
698 CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev);
699 SubchDev *sch;
700 Error *err = NULL;
701
702 sch = css_create_sch(ccw_dev->devno, errp);
703 if (!sch) {
704 return;
705 }
706 if (!virtio_ccw_rev_max(dev) && dev->force_revision_1) {
707 error_setg(&err, "Invalid value of property max_rev "
708 "(is %d expected >= 1)", virtio_ccw_rev_max(dev));
709 goto out_err;
710 }
711
712 sch->driver_data = dev;
713 sch->ccw_cb = virtio_ccw_cb;
714 sch->disable_cb = virtio_sch_disable_cb;
715 sch->id.reserved = 0xff;
716 sch->id.cu_type = VIRTIO_CCW_CU_TYPE;
717 sch->do_subchannel_work = do_subchannel_work_virtual;
718 ccw_dev->sch = sch;
719 dev->indicators = NULL;
720 dev->revision = -1;
721 css_sch_build_virtual_schib(sch, 0, VIRTIO_CCW_CHPID_TYPE);
722
723 trace_virtio_ccw_new_device(
724 sch->cssid, sch->ssid, sch->schid, sch->devno,
725 ccw_dev->devno.valid ? "user-configured" : "auto-configured");
726
727 if (kvm_enabled() && !kvm_eventfds_enabled()) {
728 dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
729 }
730
731 if (k->realize) {
732 k->realize(dev, &err);
733 if (err) {
734 goto out_err;
735 }
736 }
737
738 ck->realize(ccw_dev, &err);
739 if (err) {
740 goto out_err;
741 }
742
743 return;
744
745 out_err:
746 error_propagate(errp, err);
747 css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL);
748 ccw_dev->sch = NULL;
749 g_free(sch);
750 }
751
752 static void virtio_ccw_device_unrealize(VirtioCcwDevice *dev, Error **errp)
753 {
754 VirtIOCCWDeviceClass *dc = VIRTIO_CCW_DEVICE_GET_CLASS(dev);
755 CcwDevice *ccw_dev = CCW_DEVICE(dev);
756 SubchDev *sch = ccw_dev->sch;
757
758 if (dc->unrealize) {
759 dc->unrealize(dev, errp);
760 }
761
762 if (sch) {
763 css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL);
764 g_free(sch);
765 ccw_dev->sch = NULL;
766 }
767 if (dev->indicators) {
768 release_indicator(&dev->routes.adapter, dev->indicators);
769 dev->indicators = NULL;
770 }
771 }
772
773 /* DeviceState to VirtioCcwDevice. Note: used on datapath,
774 * be careful and test performance if you change this.
775 */
776 static inline VirtioCcwDevice *to_virtio_ccw_dev_fast(DeviceState *d)
777 {
778 CcwDevice *ccw_dev = to_ccw_dev_fast(d);
779
780 return container_of(ccw_dev, VirtioCcwDevice, parent_obj);
781 }
782
783 static uint8_t virtio_set_ind_atomic(SubchDev *sch, uint64_t ind_loc,
784 uint8_t to_be_set)
785 {
786 uint8_t ind_old, ind_new;
787 hwaddr len = 1;
788 uint8_t *ind_addr;
789
790 ind_addr = cpu_physical_memory_map(ind_loc, &len, 1);
791 if (!ind_addr) {
792 error_report("%s(%x.%x.%04x): unable to access indicator",
793 __func__, sch->cssid, sch->ssid, sch->schid);
794 return -1;
795 }
796 do {
797 ind_old = *ind_addr;
798 ind_new = ind_old | to_be_set;
799 } while (atomic_cmpxchg(ind_addr, ind_old, ind_new) != ind_old);
800 trace_virtio_ccw_set_ind(ind_loc, ind_old, ind_new);
801 cpu_physical_memory_unmap(ind_addr, len, 1, len);
802
803 return ind_old;
804 }
805
806 static void virtio_ccw_notify(DeviceState *d, uint16_t vector)
807 {
808 VirtioCcwDevice *dev = to_virtio_ccw_dev_fast(d);
809 CcwDevice *ccw_dev = to_ccw_dev_fast(d);
810 SubchDev *sch = ccw_dev->sch;
811 uint64_t indicators;
812
813 if (vector == VIRTIO_NO_VECTOR) {
814 return;
815 }
816 /*
817 * vector < VIRTIO_QUEUE_MAX: notification for a virtqueue
818 * vector == VIRTIO_QUEUE_MAX: configuration change notification
819 * bits beyond that are unused and should never be notified for
820 */
821 assert(vector <= VIRTIO_QUEUE_MAX);
822
823 if (vector < VIRTIO_QUEUE_MAX) {
824 if (!dev->indicators) {
825 return;
826 }
827 if (sch->thinint_active) {
828 /*
829 * In the adapter interrupt case, indicators points to a
830 * memory area that may be (way) larger than 64 bit and
831 * ind_bit indicates the start of the indicators in a big
832 * endian notation.
833 */
834 uint64_t ind_bit = dev->routes.adapter.ind_offset;
835
836 virtio_set_ind_atomic(sch, dev->indicators->addr +
837 (ind_bit + vector) / 8,
838 0x80 >> ((ind_bit + vector) % 8));
839 if (!virtio_set_ind_atomic(sch, dev->summary_indicator->addr,
840 0x01)) {
841 css_adapter_interrupt(CSS_IO_ADAPTER_VIRTIO, dev->thinint_isc);
842 }
843 } else {
844 assert(vector < NR_CLASSIC_INDICATOR_BITS);
845 indicators = address_space_ldq(&address_space_memory,
846 dev->indicators->addr,
847 MEMTXATTRS_UNSPECIFIED,
848 NULL);
849 indicators |= 1ULL << vector;
850 address_space_stq(&address_space_memory, dev->indicators->addr,
851 indicators, MEMTXATTRS_UNSPECIFIED, NULL);
852 css_conditional_io_interrupt(sch);
853 }
854 } else {
855 if (!dev->indicators2) {
856 return;
857 }
858 indicators = address_space_ldq(&address_space_memory,
859 dev->indicators2->addr,
860 MEMTXATTRS_UNSPECIFIED,
861 NULL);
862 indicators |= 1ULL;
863 address_space_stq(&address_space_memory, dev->indicators2->addr,
864 indicators, MEMTXATTRS_UNSPECIFIED, NULL);
865 css_conditional_io_interrupt(sch);
866 }
867 }
868
869 static void virtio_ccw_reset(DeviceState *d)
870 {
871 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
872 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
873 VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_GET_CLASS(dev);
874
875 virtio_ccw_reset_virtio(dev, vdev);
876 if (vdc->parent_reset) {
877 vdc->parent_reset(d);
878 }
879 }
880
881 static void virtio_ccw_vmstate_change(DeviceState *d, bool running)
882 {
883 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
884
885 if (running) {
886 virtio_ccw_start_ioeventfd(dev);
887 } else {
888 virtio_ccw_stop_ioeventfd(dev);
889 }
890 }
891
892 static bool virtio_ccw_query_guest_notifiers(DeviceState *d)
893 {
894 CcwDevice *dev = CCW_DEVICE(d);
895
896 return !!(dev->sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA);
897 }
898
899 static int virtio_ccw_get_mappings(VirtioCcwDevice *dev)
900 {
901 int r;
902 CcwDevice *ccw_dev = CCW_DEVICE(dev);
903
904 if (!ccw_dev->sch->thinint_active) {
905 return -EINVAL;
906 }
907
908 r = map_indicator(&dev->routes.adapter, dev->summary_indicator);
909 if (r) {
910 return r;
911 }
912 r = map_indicator(&dev->routes.adapter, dev->indicators);
913 if (r) {
914 return r;
915 }
916 dev->routes.adapter.summary_addr = dev->summary_indicator->map;
917 dev->routes.adapter.ind_addr = dev->indicators->map;
918
919 return 0;
920 }
921
922 static int virtio_ccw_setup_irqroutes(VirtioCcwDevice *dev, int nvqs)
923 {
924 int i;
925 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
926 int ret;
927 S390FLICState *fs = s390_get_flic();
928 S390FLICStateClass *fsc = s390_get_flic_class(fs);
929
930 ret = virtio_ccw_get_mappings(dev);
931 if (ret) {
932 return ret;
933 }
934 for (i = 0; i < nvqs; i++) {
935 if (!virtio_queue_get_num(vdev, i)) {
936 break;
937 }
938 }
939 dev->routes.num_routes = i;
940 return fsc->add_adapter_routes(fs, &dev->routes);
941 }
942
943 static void virtio_ccw_release_irqroutes(VirtioCcwDevice *dev, int nvqs)
944 {
945 S390FLICState *fs = s390_get_flic();
946 S390FLICStateClass *fsc = s390_get_flic_class(fs);
947
948 fsc->release_adapter_routes(fs, &dev->routes);
949 }
950
951 static int virtio_ccw_add_irqfd(VirtioCcwDevice *dev, int n)
952 {
953 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
954 VirtQueue *vq = virtio_get_queue(vdev, n);
955 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
956
957 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, notifier, NULL,
958 dev->routes.gsi[n]);
959 }
960
961 static void virtio_ccw_remove_irqfd(VirtioCcwDevice *dev, int n)
962 {
963 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
964 VirtQueue *vq = virtio_get_queue(vdev, n);
965 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
966 int ret;
967
968 ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, notifier,
969 dev->routes.gsi[n]);
970 assert(ret == 0);
971 }
972
973 static int virtio_ccw_set_guest_notifier(VirtioCcwDevice *dev, int n,
974 bool assign, bool with_irqfd)
975 {
976 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
977 VirtQueue *vq = virtio_get_queue(vdev, n);
978 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
979 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
980
981 if (assign) {
982 int r = event_notifier_init(notifier, 0);
983
984 if (r < 0) {
985 return r;
986 }
987 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
988 if (with_irqfd) {
989 r = virtio_ccw_add_irqfd(dev, n);
990 if (r) {
991 virtio_queue_set_guest_notifier_fd_handler(vq, false,
992 with_irqfd);
993 return r;
994 }
995 }
996 /*
997 * We do not support individual masking for channel devices, so we
998 * need to manually trigger any guest masking callbacks here.
999 */
1000 if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) {
1001 k->guest_notifier_mask(vdev, n, false);
1002 }
1003 /* get lost events and re-inject */
1004 if (k->guest_notifier_pending &&
1005 k->guest_notifier_pending(vdev, n)) {
1006 event_notifier_set(notifier);
1007 }
1008 } else {
1009 if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) {
1010 k->guest_notifier_mask(vdev, n, true);
1011 }
1012 if (with_irqfd) {
1013 virtio_ccw_remove_irqfd(dev, n);
1014 }
1015 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
1016 event_notifier_cleanup(notifier);
1017 }
1018 return 0;
1019 }
1020
1021 static int virtio_ccw_set_guest_notifiers(DeviceState *d, int nvqs,
1022 bool assigned)
1023 {
1024 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1025 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1026 CcwDevice *ccw_dev = CCW_DEVICE(d);
1027 bool with_irqfd = ccw_dev->sch->thinint_active && kvm_irqfds_enabled();
1028 int r, n;
1029
1030 if (with_irqfd && assigned) {
1031 /* irq routes need to be set up before assigning irqfds */
1032 r = virtio_ccw_setup_irqroutes(dev, nvqs);
1033 if (r < 0) {
1034 goto irqroute_error;
1035 }
1036 }
1037 for (n = 0; n < nvqs; n++) {
1038 if (!virtio_queue_get_num(vdev, n)) {
1039 break;
1040 }
1041 r = virtio_ccw_set_guest_notifier(dev, n, assigned, with_irqfd);
1042 if (r < 0) {
1043 goto assign_error;
1044 }
1045 }
1046 if (with_irqfd && !assigned) {
1047 /* release irq routes after irqfds have been released */
1048 virtio_ccw_release_irqroutes(dev, nvqs);
1049 }
1050 return 0;
1051
1052 assign_error:
1053 while (--n >= 0) {
1054 virtio_ccw_set_guest_notifier(dev, n, !assigned, false);
1055 }
1056 irqroute_error:
1057 if (with_irqfd && assigned) {
1058 virtio_ccw_release_irqroutes(dev, nvqs);
1059 }
1060 return r;
1061 }
1062
1063 static void virtio_ccw_save_queue(DeviceState *d, int n, QEMUFile *f)
1064 {
1065 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1066 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1067
1068 qemu_put_be16(f, virtio_queue_vector(vdev, n));
1069 }
1070
1071 static int virtio_ccw_load_queue(DeviceState *d, int n, QEMUFile *f)
1072 {
1073 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1074 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1075 uint16_t vector;
1076
1077 qemu_get_be16s(f, &vector);
1078 virtio_queue_set_vector(vdev, n , vector);
1079
1080 return 0;
1081 }
1082
1083 static void virtio_ccw_save_config(DeviceState *d, QEMUFile *f)
1084 {
1085 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1086 vmstate_save_state(f, &vmstate_virtio_ccw_dev, dev, NULL);
1087 }
1088
1089 static int virtio_ccw_load_config(DeviceState *d, QEMUFile *f)
1090 {
1091 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1092 return vmstate_load_state(f, &vmstate_virtio_ccw_dev, dev, 1);
1093 }
1094
1095 static void virtio_ccw_pre_plugged(DeviceState *d, Error **errp)
1096 {
1097 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1098 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1099
1100 if (dev->max_rev >= 1) {
1101 virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
1102 }
1103 }
1104
1105 /* This is called by virtio-bus just after the device is plugged. */
1106 static void virtio_ccw_device_plugged(DeviceState *d, Error **errp)
1107 {
1108 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1109 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1110 CcwDevice *ccw_dev = CCW_DEVICE(d);
1111 SubchDev *sch = ccw_dev->sch;
1112 int n = virtio_get_num_queues(vdev);
1113 S390FLICState *flic = s390_get_flic();
1114
1115 if (!virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) {
1116 dev->max_rev = 0;
1117 }
1118
1119 if (virtio_get_num_queues(vdev) > VIRTIO_QUEUE_MAX) {
1120 error_setg(errp, "The number of virtqueues %d "
1121 "exceeds virtio limit %d", n,
1122 VIRTIO_QUEUE_MAX);
1123 return;
1124 }
1125 if (virtio_get_num_queues(vdev) > flic->adapter_routes_max_batch) {
1126 error_setg(errp, "The number of virtqueues %d "
1127 "exceeds flic adapter route limit %d", n,
1128 flic->adapter_routes_max_batch);
1129 return;
1130 }
1131
1132 sch->id.cu_model = virtio_bus_get_vdev_id(&dev->bus);
1133
1134
1135 css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid,
1136 d->hotplugged, 1);
1137 }
1138
1139 static void virtio_ccw_device_unplugged(DeviceState *d)
1140 {
1141 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1142
1143 virtio_ccw_stop_ioeventfd(dev);
1144 }
1145 /**************** Virtio-ccw Bus Device Descriptions *******************/
1146
1147 static void virtio_ccw_busdev_realize(DeviceState *dev, Error **errp)
1148 {
1149 VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev;
1150
1151 virtio_ccw_bus_new(&_dev->bus, sizeof(_dev->bus), _dev);
1152 virtio_ccw_device_realize(_dev, errp);
1153 }
1154
1155 static void virtio_ccw_busdev_unrealize(DeviceState *dev, Error **errp)
1156 {
1157 VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev;
1158
1159 virtio_ccw_device_unrealize(_dev, errp);
1160 }
1161
1162 static void virtio_ccw_busdev_unplug(HotplugHandler *hotplug_dev,
1163 DeviceState *dev, Error **errp)
1164 {
1165 VirtioCcwDevice *_dev = to_virtio_ccw_dev_fast(dev);
1166
1167 virtio_ccw_stop_ioeventfd(_dev);
1168 }
1169
1170 static void virtio_ccw_device_class_init(ObjectClass *klass, void *data)
1171 {
1172 DeviceClass *dc = DEVICE_CLASS(klass);
1173 CCWDeviceClass *k = CCW_DEVICE_CLASS(dc);
1174 VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_CLASS(klass);
1175
1176 k->unplug = virtio_ccw_busdev_unplug;
1177 dc->realize = virtio_ccw_busdev_realize;
1178 dc->unrealize = virtio_ccw_busdev_unrealize;
1179 dc->bus_type = TYPE_VIRTUAL_CSS_BUS;
1180 device_class_set_parent_reset(dc, virtio_ccw_reset, &vdc->parent_reset);
1181 }
1182
1183 static const TypeInfo virtio_ccw_device_info = {
1184 .name = TYPE_VIRTIO_CCW_DEVICE,
1185 .parent = TYPE_CCW_DEVICE,
1186 .instance_size = sizeof(VirtioCcwDevice),
1187 .class_init = virtio_ccw_device_class_init,
1188 .class_size = sizeof(VirtIOCCWDeviceClass),
1189 .abstract = true,
1190 };
1191
1192 /* virtio-ccw-bus */
1193
1194 static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size,
1195 VirtioCcwDevice *dev)
1196 {
1197 DeviceState *qdev = DEVICE(dev);
1198 char virtio_bus_name[] = "virtio-bus";
1199
1200 qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_CCW_BUS,
1201 qdev, virtio_bus_name);
1202 }
1203
1204 static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data)
1205 {
1206 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
1207 BusClass *bus_class = BUS_CLASS(klass);
1208
1209 bus_class->max_dev = 1;
1210 k->notify = virtio_ccw_notify;
1211 k->vmstate_change = virtio_ccw_vmstate_change;
1212 k->query_guest_notifiers = virtio_ccw_query_guest_notifiers;
1213 k->set_guest_notifiers = virtio_ccw_set_guest_notifiers;
1214 k->save_queue = virtio_ccw_save_queue;
1215 k->load_queue = virtio_ccw_load_queue;
1216 k->save_config = virtio_ccw_save_config;
1217 k->load_config = virtio_ccw_load_config;
1218 k->pre_plugged = virtio_ccw_pre_plugged;
1219 k->device_plugged = virtio_ccw_device_plugged;
1220 k->device_unplugged = virtio_ccw_device_unplugged;
1221 k->ioeventfd_enabled = virtio_ccw_ioeventfd_enabled;
1222 k->ioeventfd_assign = virtio_ccw_ioeventfd_assign;
1223 }
1224
1225 static const TypeInfo virtio_ccw_bus_info = {
1226 .name = TYPE_VIRTIO_CCW_BUS,
1227 .parent = TYPE_VIRTIO_BUS,
1228 .instance_size = sizeof(VirtioCcwBusState),
1229 .class_init = virtio_ccw_bus_class_init,
1230 };
1231
1232 static void virtio_ccw_register(void)
1233 {
1234 type_register_static(&virtio_ccw_bus_info);
1235 type_register_static(&virtio_ccw_device_info);
1236 }
1237
1238 type_init(virtio_ccw_register)