]> git.proxmox.com Git - mirror_qemu.git/blob - hw/vfio/ccw.c
Merge tag 'pull-maintainer-may24-160524-2' of https://gitlab.com/stsquad/qemu into...
[mirror_qemu.git] / hw / vfio / ccw.c
1 /*
2 * vfio based subchannel assignment support
3 *
4 * Copyright 2017 IBM Corp.
5 * Copyright 2019 Red Hat, Inc.
6 *
7 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
8 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
9 * Pierre Morel <pmorel@linux.vnet.ibm.com>
10 * Cornelia Huck <cohuck@redhat.com>
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2 or (at
13 * your option) any later version. See the COPYING file in the top-level
14 * directory.
15 */
16
17 #include "qemu/osdep.h"
18 #include CONFIG_DEVICES /* CONFIG_IOMMUFD */
19 #include <linux/vfio.h>
20 #include <linux/vfio_ccw.h>
21 #include <sys/ioctl.h>
22
23 #include "qapi/error.h"
24 #include "hw/vfio/vfio-common.h"
25 #include "sysemu/iommufd.h"
26 #include "hw/s390x/s390-ccw.h"
27 #include "hw/s390x/vfio-ccw.h"
28 #include "hw/qdev-properties.h"
29 #include "hw/s390x/ccw-device.h"
30 #include "exec/address-spaces.h"
31 #include "qemu/error-report.h"
32 #include "qemu/main-loop.h"
33 #include "qemu/module.h"
34
35 struct VFIOCCWDevice {
36 S390CCWDevice cdev;
37 VFIODevice vdev;
38 uint64_t io_region_size;
39 uint64_t io_region_offset;
40 struct ccw_io_region *io_region;
41 uint64_t async_cmd_region_size;
42 uint64_t async_cmd_region_offset;
43 struct ccw_cmd_region *async_cmd_region;
44 uint64_t schib_region_size;
45 uint64_t schib_region_offset;
46 struct ccw_schib_region *schib_region;
47 uint64_t crw_region_size;
48 uint64_t crw_region_offset;
49 struct ccw_crw_region *crw_region;
50 EventNotifier io_notifier;
51 EventNotifier crw_notifier;
52 EventNotifier req_notifier;
53 bool force_orb_pfch;
54 bool warned_orb_pfch;
55 };
56
57 static inline void warn_once_pfch(VFIOCCWDevice *vcdev, SubchDev *sch,
58 const char *msg)
59 {
60 warn_report_once_cond(&vcdev->warned_orb_pfch,
61 "vfio-ccw (devno %x.%x.%04x): %s",
62 sch->cssid, sch->ssid, sch->devno, msg);
63 }
64
65 static void vfio_ccw_compute_needs_reset(VFIODevice *vdev)
66 {
67 vdev->needs_reset = false;
68 }
69
70 /*
71 * We don't need vfio_hot_reset_multi and vfio_eoi operations for
72 * vfio_ccw device now.
73 */
74 struct VFIODeviceOps vfio_ccw_ops = {
75 .vfio_compute_needs_reset = vfio_ccw_compute_needs_reset,
76 };
77
78 static IOInstEnding vfio_ccw_handle_request(SubchDev *sch)
79 {
80 VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
81 struct ccw_io_region *region = vcdev->io_region;
82 int ret;
83
84 if (!(sch->orb.ctrl0 & ORB_CTRL0_MASK_PFCH) && vcdev->force_orb_pfch) {
85 sch->orb.ctrl0 |= ORB_CTRL0_MASK_PFCH;
86 warn_once_pfch(vcdev, sch, "PFCH flag forced");
87 }
88
89 QEMU_BUILD_BUG_ON(sizeof(region->orb_area) != sizeof(ORB));
90 QEMU_BUILD_BUG_ON(sizeof(region->scsw_area) != sizeof(SCSW));
91 QEMU_BUILD_BUG_ON(sizeof(region->irb_area) != sizeof(IRB));
92
93 memset(region, 0, sizeof(*region));
94
95 memcpy(region->orb_area, &sch->orb, sizeof(ORB));
96 memcpy(region->scsw_area, &sch->curr_status.scsw, sizeof(SCSW));
97
98 again:
99 ret = pwrite(vcdev->vdev.fd, region,
100 vcdev->io_region_size, vcdev->io_region_offset);
101 if (ret != vcdev->io_region_size) {
102 if (errno == EAGAIN) {
103 goto again;
104 }
105 error_report("vfio-ccw: write I/O region failed with errno=%d", errno);
106 ret = errno ? -errno : -EFAULT;
107 } else {
108 ret = 0;
109 }
110 switch (ret) {
111 case 0:
112 return IOINST_CC_EXPECTED;
113 case -EBUSY:
114 return IOINST_CC_BUSY;
115 case -ENODEV:
116 case -EACCES:
117 return IOINST_CC_NOT_OPERATIONAL;
118 case -EFAULT:
119 default:
120 sch_gen_unit_exception(sch);
121 css_inject_io_interrupt(sch);
122 return IOINST_CC_EXPECTED;
123 }
124 }
125
126 static IOInstEnding vfio_ccw_handle_store(SubchDev *sch)
127 {
128 VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
129 SCHIB *schib = &sch->curr_status;
130 struct ccw_schib_region *region = vcdev->schib_region;
131 SCHIB *s;
132 int ret;
133
134 /* schib region not available so nothing else to do */
135 if (!region) {
136 return IOINST_CC_EXPECTED;
137 }
138
139 memset(region, 0, sizeof(*region));
140 ret = pread(vcdev->vdev.fd, region, vcdev->schib_region_size,
141 vcdev->schib_region_offset);
142
143 if (ret == -1) {
144 /*
145 * Device is probably damaged, but store subchannel does not
146 * have a nonzero cc defined for this scenario. Log an error,
147 * and presume things are otherwise fine.
148 */
149 error_report("vfio-ccw: store region read failed with errno=%d", errno);
150 return IOINST_CC_EXPECTED;
151 }
152
153 /*
154 * Selectively copy path-related bits of the SCHIB,
155 * rather than copying the entire struct.
156 */
157 s = (SCHIB *)region->schib_area;
158 schib->pmcw.pnom = s->pmcw.pnom;
159 schib->pmcw.lpum = s->pmcw.lpum;
160 schib->pmcw.pam = s->pmcw.pam;
161 schib->pmcw.pom = s->pmcw.pom;
162
163 if (s->scsw.flags & SCSW_FLAGS_MASK_PNO) {
164 schib->scsw.flags |= SCSW_FLAGS_MASK_PNO;
165 }
166
167 return IOINST_CC_EXPECTED;
168 }
169
170 static int vfio_ccw_handle_clear(SubchDev *sch)
171 {
172 VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
173 struct ccw_cmd_region *region = vcdev->async_cmd_region;
174 int ret;
175
176 if (!vcdev->async_cmd_region) {
177 /* Async command region not available, fall back to emulation */
178 return -ENOSYS;
179 }
180
181 memset(region, 0, sizeof(*region));
182 region->command = VFIO_CCW_ASYNC_CMD_CSCH;
183
184 again:
185 ret = pwrite(vcdev->vdev.fd, region,
186 vcdev->async_cmd_region_size, vcdev->async_cmd_region_offset);
187 if (ret != vcdev->async_cmd_region_size) {
188 if (errno == EAGAIN) {
189 goto again;
190 }
191 error_report("vfio-ccw: write cmd region failed with errno=%d", errno);
192 ret = errno ? -errno : -EFAULT;
193 } else {
194 ret = 0;
195 }
196 switch (ret) {
197 case 0:
198 case -ENODEV:
199 case -EACCES:
200 return ret;
201 case -EFAULT:
202 default:
203 sch_gen_unit_exception(sch);
204 css_inject_io_interrupt(sch);
205 return 0;
206 }
207 }
208
209 static int vfio_ccw_handle_halt(SubchDev *sch)
210 {
211 VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
212 struct ccw_cmd_region *region = vcdev->async_cmd_region;
213 int ret;
214
215 if (!vcdev->async_cmd_region) {
216 /* Async command region not available, fall back to emulation */
217 return -ENOSYS;
218 }
219
220 memset(region, 0, sizeof(*region));
221 region->command = VFIO_CCW_ASYNC_CMD_HSCH;
222
223 again:
224 ret = pwrite(vcdev->vdev.fd, region,
225 vcdev->async_cmd_region_size, vcdev->async_cmd_region_offset);
226 if (ret != vcdev->async_cmd_region_size) {
227 if (errno == EAGAIN) {
228 goto again;
229 }
230 error_report("vfio-ccw: write cmd region failed with errno=%d", errno);
231 ret = errno ? -errno : -EFAULT;
232 } else {
233 ret = 0;
234 }
235 switch (ret) {
236 case 0:
237 case -EBUSY:
238 case -ENODEV:
239 case -EACCES:
240 return ret;
241 case -EFAULT:
242 default:
243 sch_gen_unit_exception(sch);
244 css_inject_io_interrupt(sch);
245 return 0;
246 }
247 }
248
249 static void vfio_ccw_reset(DeviceState *dev)
250 {
251 VFIOCCWDevice *vcdev = VFIO_CCW(dev);
252
253 ioctl(vcdev->vdev.fd, VFIO_DEVICE_RESET);
254 }
255
256 static void vfio_ccw_crw_read(VFIOCCWDevice *vcdev)
257 {
258 struct ccw_crw_region *region = vcdev->crw_region;
259 CRW crw;
260 int size;
261
262 /* Keep reading CRWs as long as data is returned */
263 do {
264 memset(region, 0, sizeof(*region));
265 size = pread(vcdev->vdev.fd, region, vcdev->crw_region_size,
266 vcdev->crw_region_offset);
267
268 if (size == -1) {
269 error_report("vfio-ccw: Read crw region failed with errno=%d",
270 errno);
271 break;
272 }
273
274 if (region->crw == 0) {
275 /* No more CRWs to queue */
276 break;
277 }
278
279 memcpy(&crw, &region->crw, sizeof(CRW));
280
281 css_crw_add_to_queue(crw);
282 } while (1);
283 }
284
285 static void vfio_ccw_req_notifier_handler(void *opaque)
286 {
287 VFIOCCWDevice *vcdev = opaque;
288 Error *err = NULL;
289
290 if (!event_notifier_test_and_clear(&vcdev->req_notifier)) {
291 return;
292 }
293
294 qdev_unplug(DEVICE(vcdev), &err);
295 if (err) {
296 warn_reportf_err(err, VFIO_MSG_PREFIX, vcdev->vdev.name);
297 }
298 }
299
300 static void vfio_ccw_crw_notifier_handler(void *opaque)
301 {
302 VFIOCCWDevice *vcdev = opaque;
303
304 while (event_notifier_test_and_clear(&vcdev->crw_notifier)) {
305 vfio_ccw_crw_read(vcdev);
306 }
307 }
308
309 static void vfio_ccw_io_notifier_handler(void *opaque)
310 {
311 VFIOCCWDevice *vcdev = opaque;
312 struct ccw_io_region *region = vcdev->io_region;
313 CcwDevice *ccw_dev = CCW_DEVICE(vcdev);
314 SubchDev *sch = ccw_dev->sch;
315 SCHIB *schib = &sch->curr_status;
316 SCSW s;
317 IRB irb;
318 ESW esw;
319 int size;
320
321 if (!event_notifier_test_and_clear(&vcdev->io_notifier)) {
322 return;
323 }
324
325 size = pread(vcdev->vdev.fd, region, vcdev->io_region_size,
326 vcdev->io_region_offset);
327 if (size == -1) {
328 switch (errno) {
329 case ENODEV:
330 /* Generate a deferred cc 3 condition. */
331 schib->scsw.flags |= SCSW_FLAGS_MASK_CC;
332 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
333 schib->scsw.ctrl |= (SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND);
334 goto read_err;
335 case EFAULT:
336 /* Memory problem, generate channel data check. */
337 schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
338 schib->scsw.cstat = SCSW_CSTAT_DATA_CHECK;
339 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
340 schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
341 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
342 goto read_err;
343 default:
344 /* Error, generate channel program check. */
345 schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
346 schib->scsw.cstat = SCSW_CSTAT_PROG_CHECK;
347 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
348 schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
349 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
350 goto read_err;
351 }
352 } else if (size != vcdev->io_region_size) {
353 /* Information transfer error, generate channel-control check. */
354 schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
355 schib->scsw.cstat = SCSW_CSTAT_CHN_CTRL_CHK;
356 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
357 schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
358 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
359 goto read_err;
360 }
361
362 memcpy(&irb, region->irb_area, sizeof(IRB));
363
364 /* Update control block via irb. */
365 s = schib->scsw;
366 copy_scsw_to_guest(&s, &irb.scsw);
367 schib->scsw = s;
368
369 copy_esw_to_guest(&esw, &irb.esw);
370 sch->esw = esw;
371
372 /* If a uint check is pending, copy sense data. */
373 if ((schib->scsw.dstat & SCSW_DSTAT_UNIT_CHECK) &&
374 (schib->pmcw.chars & PMCW_CHARS_MASK_CSENSE)) {
375 memcpy(sch->sense_data, irb.ecw, sizeof(irb.ecw));
376 }
377
378 read_err:
379 css_inject_io_interrupt(sch);
380 }
381
382 static void vfio_ccw_register_irq_notifier(VFIOCCWDevice *vcdev,
383 unsigned int irq,
384 Error **errp)
385 {
386 VFIODevice *vdev = &vcdev->vdev;
387 struct vfio_irq_info *irq_info;
388 size_t argsz;
389 int fd;
390 EventNotifier *notifier;
391 IOHandler *fd_read;
392
393 switch (irq) {
394 case VFIO_CCW_IO_IRQ_INDEX:
395 notifier = &vcdev->io_notifier;
396 fd_read = vfio_ccw_io_notifier_handler;
397 break;
398 case VFIO_CCW_CRW_IRQ_INDEX:
399 notifier = &vcdev->crw_notifier;
400 fd_read = vfio_ccw_crw_notifier_handler;
401 break;
402 case VFIO_CCW_REQ_IRQ_INDEX:
403 notifier = &vcdev->req_notifier;
404 fd_read = vfio_ccw_req_notifier_handler;
405 break;
406 default:
407 error_setg(errp, "vfio: Unsupported device irq(%d)", irq);
408 return;
409 }
410
411 if (vdev->num_irqs < irq + 1) {
412 error_setg(errp, "vfio: IRQ %u not available (number of irqs %u)",
413 irq, vdev->num_irqs);
414 return;
415 }
416
417 argsz = sizeof(*irq_info);
418 irq_info = g_malloc0(argsz);
419 irq_info->index = irq;
420 irq_info->argsz = argsz;
421 if (ioctl(vdev->fd, VFIO_DEVICE_GET_IRQ_INFO,
422 irq_info) < 0 || irq_info->count < 1) {
423 error_setg_errno(errp, errno, "vfio: Error getting irq info");
424 goto out_free_info;
425 }
426
427 if (event_notifier_init(notifier, 0)) {
428 error_setg_errno(errp, errno,
429 "vfio: Unable to init event notifier for irq (%d)",
430 irq);
431 goto out_free_info;
432 }
433
434 fd = event_notifier_get_fd(notifier);
435 qemu_set_fd_handler(fd, fd_read, NULL, vcdev);
436
437 if (vfio_set_irq_signaling(vdev, irq, 0,
438 VFIO_IRQ_SET_ACTION_TRIGGER, fd, errp)) {
439 qemu_set_fd_handler(fd, NULL, NULL, vcdev);
440 event_notifier_cleanup(notifier);
441 }
442
443 out_free_info:
444 g_free(irq_info);
445 }
446
447 static void vfio_ccw_unregister_irq_notifier(VFIOCCWDevice *vcdev,
448 unsigned int irq)
449 {
450 Error *err = NULL;
451 EventNotifier *notifier;
452
453 switch (irq) {
454 case VFIO_CCW_IO_IRQ_INDEX:
455 notifier = &vcdev->io_notifier;
456 break;
457 case VFIO_CCW_CRW_IRQ_INDEX:
458 notifier = &vcdev->crw_notifier;
459 break;
460 case VFIO_CCW_REQ_IRQ_INDEX:
461 notifier = &vcdev->req_notifier;
462 break;
463 default:
464 error_report("vfio: Unsupported device irq(%d)", irq);
465 return;
466 }
467
468 if (vfio_set_irq_signaling(&vcdev->vdev, irq, 0,
469 VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
470 warn_reportf_err(err, VFIO_MSG_PREFIX, vcdev->vdev.name);
471 }
472
473 qemu_set_fd_handler(event_notifier_get_fd(notifier),
474 NULL, NULL, vcdev);
475 event_notifier_cleanup(notifier);
476 }
477
478 static void vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp)
479 {
480 VFIODevice *vdev = &vcdev->vdev;
481 struct vfio_region_info *info;
482 int ret;
483
484 /* Sanity check device */
485 if (!(vdev->flags & VFIO_DEVICE_FLAGS_CCW)) {
486 error_setg(errp, "vfio: Um, this isn't a vfio-ccw device");
487 return;
488 }
489
490 /*
491 * We always expect at least the I/O region to be present. We also
492 * may have a variable number of regions governed by capabilities.
493 */
494 if (vdev->num_regions < VFIO_CCW_CONFIG_REGION_INDEX + 1) {
495 error_setg(errp, "vfio: too few regions (%u), expected at least %u",
496 vdev->num_regions, VFIO_CCW_CONFIG_REGION_INDEX + 1);
497 return;
498 }
499
500 ret = vfio_get_region_info(vdev, VFIO_CCW_CONFIG_REGION_INDEX, &info);
501 if (ret) {
502 error_setg_errno(errp, -ret, "vfio: Error getting config info");
503 return;
504 }
505
506 vcdev->io_region_size = info->size;
507 if (sizeof(*vcdev->io_region) != vcdev->io_region_size) {
508 error_setg(errp, "vfio: Unexpected size of the I/O region");
509 goto out_err;
510 }
511
512 vcdev->io_region_offset = info->offset;
513 vcdev->io_region = g_malloc0(info->size);
514 g_free(info);
515
516 /* check for the optional async command region */
517 ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW,
518 VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD, &info);
519 if (!ret) {
520 vcdev->async_cmd_region_size = info->size;
521 if (sizeof(*vcdev->async_cmd_region) != vcdev->async_cmd_region_size) {
522 error_setg(errp, "vfio: Unexpected size of the async cmd region");
523 goto out_err;
524 }
525 vcdev->async_cmd_region_offset = info->offset;
526 vcdev->async_cmd_region = g_malloc0(info->size);
527 g_free(info);
528 }
529
530 ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW,
531 VFIO_REGION_SUBTYPE_CCW_SCHIB, &info);
532 if (!ret) {
533 vcdev->schib_region_size = info->size;
534 if (sizeof(*vcdev->schib_region) != vcdev->schib_region_size) {
535 error_setg(errp, "vfio: Unexpected size of the schib region");
536 goto out_err;
537 }
538 vcdev->schib_region_offset = info->offset;
539 vcdev->schib_region = g_malloc(info->size);
540 g_free(info);
541 }
542
543 ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW,
544 VFIO_REGION_SUBTYPE_CCW_CRW, &info);
545
546 if (!ret) {
547 vcdev->crw_region_size = info->size;
548 if (sizeof(*vcdev->crw_region) != vcdev->crw_region_size) {
549 error_setg(errp, "vfio: Unexpected size of the CRW region");
550 goto out_err;
551 }
552 vcdev->crw_region_offset = info->offset;
553 vcdev->crw_region = g_malloc(info->size);
554 g_free(info);
555 }
556
557 return;
558
559 out_err:
560 g_free(vcdev->crw_region);
561 g_free(vcdev->schib_region);
562 g_free(vcdev->async_cmd_region);
563 g_free(vcdev->io_region);
564 g_free(info);
565 return;
566 }
567
568 static void vfio_ccw_put_region(VFIOCCWDevice *vcdev)
569 {
570 g_free(vcdev->crw_region);
571 g_free(vcdev->schib_region);
572 g_free(vcdev->async_cmd_region);
573 g_free(vcdev->io_region);
574 }
575
576 static void vfio_ccw_realize(DeviceState *dev, Error **errp)
577 {
578 S390CCWDevice *cdev = S390_CCW_DEVICE(dev);
579 VFIOCCWDevice *vcdev = VFIO_CCW(cdev);
580 S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(cdev);
581 VFIODevice *vbasedev = &vcdev->vdev;
582 Error *err = NULL;
583 int ret;
584
585 /* Call the class init function for subchannel. */
586 if (cdc->realize) {
587 cdc->realize(cdev, vcdev->vdev.sysfsdev, &err);
588 if (err) {
589 goto out_err_propagate;
590 }
591 }
592
593 if (vfio_device_get_name(vbasedev, errp) < 0) {
594 return;
595 }
596
597 ret = vfio_attach_device(cdev->mdevid, vbasedev,
598 &address_space_memory, errp);
599 if (ret) {
600 goto out_attach_dev_err;
601 }
602
603 vfio_ccw_get_region(vcdev, &err);
604 if (err) {
605 goto out_region_err;
606 }
607
608 vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX, &err);
609 if (err) {
610 goto out_io_notifier_err;
611 }
612
613 if (vcdev->crw_region) {
614 vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX, &err);
615 if (err) {
616 goto out_irq_notifier_err;
617 }
618 }
619
620 vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_REQ_IRQ_INDEX, &err);
621 if (err) {
622 /*
623 * Report this error, but do not make it a failing condition.
624 * Lack of this IRQ in the host does not prevent normal operation.
625 */
626 error_report_err(err);
627 }
628
629 return;
630
631 out_irq_notifier_err:
632 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_REQ_IRQ_INDEX);
633 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX);
634 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX);
635 out_io_notifier_err:
636 vfio_ccw_put_region(vcdev);
637 out_region_err:
638 vfio_detach_device(vbasedev);
639 out_attach_dev_err:
640 g_free(vbasedev->name);
641 if (cdc->unrealize) {
642 cdc->unrealize(cdev);
643 }
644 out_err_propagate:
645 error_propagate(errp, err);
646 }
647
648 static void vfio_ccw_unrealize(DeviceState *dev)
649 {
650 S390CCWDevice *cdev = S390_CCW_DEVICE(dev);
651 VFIOCCWDevice *vcdev = VFIO_CCW(cdev);
652 S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(cdev);
653
654 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_REQ_IRQ_INDEX);
655 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX);
656 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX);
657 vfio_ccw_put_region(vcdev);
658 vfio_detach_device(&vcdev->vdev);
659 g_free(vcdev->vdev.name);
660
661 if (cdc->unrealize) {
662 cdc->unrealize(cdev);
663 }
664 }
665
666 static Property vfio_ccw_properties[] = {
667 DEFINE_PROP_STRING("sysfsdev", VFIOCCWDevice, vdev.sysfsdev),
668 DEFINE_PROP_BOOL("force-orb-pfch", VFIOCCWDevice, force_orb_pfch, false),
669 #ifdef CONFIG_IOMMUFD
670 DEFINE_PROP_LINK("iommufd", VFIOCCWDevice, vdev.iommufd,
671 TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *),
672 #endif
673 DEFINE_PROP_END_OF_LIST(),
674 };
675
676 static const VMStateDescription vfio_ccw_vmstate = {
677 .name = "vfio-ccw",
678 .unmigratable = 1,
679 };
680
681 static void vfio_ccw_instance_init(Object *obj)
682 {
683 VFIOCCWDevice *vcdev = VFIO_CCW(obj);
684 VFIODevice *vbasedev = &vcdev->vdev;
685
686 /*
687 * All vfio-ccw devices are believed to operate in a way compatible with
688 * discarding of memory in RAM blocks, ie. pages pinned in the host are
689 * in the current working set of the guest driver and therefore never
690 * overlap e.g., with pages available to the guest balloon driver. This
691 * needs to be set before vfio_get_device() for vfio common to handle
692 * ram_block_discard_disable().
693 */
694 vfio_device_init(vbasedev, VFIO_DEVICE_TYPE_CCW, &vfio_ccw_ops,
695 DEVICE(vcdev), true);
696 }
697
698 #ifdef CONFIG_IOMMUFD
699 static void vfio_ccw_set_fd(Object *obj, const char *str, Error **errp)
700 {
701 vfio_device_set_fd(&VFIO_CCW(obj)->vdev, str, errp);
702 }
703 #endif
704
705 static void vfio_ccw_class_init(ObjectClass *klass, void *data)
706 {
707 DeviceClass *dc = DEVICE_CLASS(klass);
708 S390CCWDeviceClass *cdc = S390_CCW_DEVICE_CLASS(klass);
709
710 device_class_set_props(dc, vfio_ccw_properties);
711 #ifdef CONFIG_IOMMUFD
712 object_class_property_add_str(klass, "fd", NULL, vfio_ccw_set_fd);
713 #endif
714 dc->vmsd = &vfio_ccw_vmstate;
715 dc->desc = "VFIO-based subchannel assignment";
716 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
717 dc->realize = vfio_ccw_realize;
718 dc->unrealize = vfio_ccw_unrealize;
719 dc->reset = vfio_ccw_reset;
720
721 cdc->handle_request = vfio_ccw_handle_request;
722 cdc->handle_halt = vfio_ccw_handle_halt;
723 cdc->handle_clear = vfio_ccw_handle_clear;
724 cdc->handle_store = vfio_ccw_handle_store;
725 }
726
727 static const TypeInfo vfio_ccw_info = {
728 .name = TYPE_VFIO_CCW,
729 .parent = TYPE_S390_CCW,
730 .instance_size = sizeof(VFIOCCWDevice),
731 .instance_init = vfio_ccw_instance_init,
732 .class_init = vfio_ccw_class_init,
733 };
734
735 static void register_vfio_ccw_type(void)
736 {
737 type_register_static(&vfio_ccw_info);
738 }
739
740 type_init(register_vfio_ccw_type)