1 /*
2 * vfio based subchannel assignment support
3 *
4 * Copyright 2017 IBM Corp.
5 * Copyright 2019 Red Hat, Inc.
6 *
7 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
8 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
9 * Pierre Morel <pmorel@linux.vnet.ibm.com>
10 * Cornelia Huck <cohuck@redhat.com>
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2 or (at
13 * your option) any later version. See the COPYING file in the top-level
14 * directory.
15 */
16
17 #include "qemu/osdep.h"
18 #include CONFIG_DEVICES /* CONFIG_IOMMUFD */
19 #include <linux/vfio.h>
20 #include <linux/vfio_ccw.h>
21 #include <sys/ioctl.h>
22
23 #include "qapi/error.h"
24 #include "hw/vfio/vfio-common.h"
25 #include "sysemu/iommufd.h"
26 #include "hw/s390x/s390-ccw.h"
27 #include "hw/s390x/vfio-ccw.h"
28 #include "hw/qdev-properties.h"
29 #include "hw/s390x/ccw-device.h"
30 #include "exec/address-spaces.h"
31 #include "qemu/error-report.h"
32 #include "qemu/main-loop.h"
33 #include "qemu/module.h"
34
35 struct VFIOCCWDevice {
36 S390CCWDevice cdev;
37 VFIODevice vdev;
38 uint64_t io_region_size;
39 uint64_t io_region_offset;
40 struct ccw_io_region *io_region;
41 uint64_t async_cmd_region_size;
42 uint64_t async_cmd_region_offset;
43 struct ccw_cmd_region *async_cmd_region;
44 uint64_t schib_region_size;
45 uint64_t schib_region_offset;
46 struct ccw_schib_region *schib_region;
47 uint64_t crw_region_size;
48 uint64_t crw_region_offset;
49 struct ccw_crw_region *crw_region;
50 EventNotifier io_notifier;
51 EventNotifier crw_notifier;
52 EventNotifier req_notifier;
53 bool force_orb_pfch;
54 bool warned_orb_pfch;
55 };
56
warn_once_pfch(VFIOCCWDevice * vcdev,SubchDev * sch,const char * msg)57 static inline void warn_once_pfch(VFIOCCWDevice *vcdev, SubchDev *sch,
58 const char *msg)
59 {
60 warn_report_once_cond(&vcdev->warned_orb_pfch,
61 "vfio-ccw (devno %x.%x.%04x): %s",
62 sch->cssid, sch->ssid, sch->devno, msg);
63 }
64
vfio_ccw_compute_needs_reset(VFIODevice * vdev)65 static void vfio_ccw_compute_needs_reset(VFIODevice *vdev)
66 {
67 vdev->needs_reset = false;
68 }
69
70 /*
71 * We don't need vfio_hot_reset_multi and vfio_eoi operations for
72 * vfio_ccw device now.
73 */
74 struct VFIODeviceOps vfio_ccw_ops = {
75 .vfio_compute_needs_reset = vfio_ccw_compute_needs_reset,
76 };
77
vfio_ccw_handle_request(SubchDev * sch)78 static IOInstEnding vfio_ccw_handle_request(SubchDev *sch)
79 {
80 VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
81 struct ccw_io_region *region = vcdev->io_region;
82 int ret;
83
84 if (!(sch->orb.ctrl0 & ORB_CTRL0_MASK_PFCH) && vcdev->force_orb_pfch) {
85 sch->orb.ctrl0 |= ORB_CTRL0_MASK_PFCH;
86 warn_once_pfch(vcdev, sch, "PFCH flag forced");
87 }
88
89 QEMU_BUILD_BUG_ON(sizeof(region->orb_area) != sizeof(ORB));
90 QEMU_BUILD_BUG_ON(sizeof(region->scsw_area) != sizeof(SCSW));
91 QEMU_BUILD_BUG_ON(sizeof(region->irb_area) != sizeof(IRB));
92
93 memset(region, 0, sizeof(*region));
94
95 memcpy(region->orb_area, &sch->orb, sizeof(ORB));
96 memcpy(region->scsw_area, &sch->curr_status.scsw, sizeof(SCSW));
97
98 again:
99 ret = pwrite(vcdev->vdev.fd, region,
100 vcdev->io_region_size, vcdev->io_region_offset);
101 if (ret != vcdev->io_region_size) {
102 if (errno == EAGAIN) {
103 goto again;
104 }
105 error_report("vfio-ccw: write I/O region failed with errno=%d", errno);
106 ret = errno ? -errno : -EFAULT;
107 } else {
108 ret = 0;
109 }
110 switch (ret) {
111 case 0:
112 return IOINST_CC_EXPECTED;
113 case -EBUSY:
114 return IOINST_CC_BUSY;
115 case -ENODEV:
116 case -EACCES:
117 return IOINST_CC_NOT_OPERATIONAL;
118 case -EFAULT:
119 default:
120 sch_gen_unit_exception(sch);
121 css_inject_io_interrupt(sch);
122 return IOINST_CC_EXPECTED;
123 }
124 }
125
vfio_ccw_handle_store(SubchDev * sch)126 static IOInstEnding vfio_ccw_handle_store(SubchDev *sch)
127 {
128 VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
129 SCHIB *schib = &sch->curr_status;
130 struct ccw_schib_region *region = vcdev->schib_region;
131 SCHIB *s;
132 int ret;
133
134 /* schib region not available so nothing else to do */
135 if (!region) {
136 return IOINST_CC_EXPECTED;
137 }
138
139 memset(region, 0, sizeof(*region));
140 ret = pread(vcdev->vdev.fd, region, vcdev->schib_region_size,
141 vcdev->schib_region_offset);
142
143 if (ret == -1) {
144 /*
145 * Device is probably damaged, but store subchannel does not
146 * have a nonzero cc defined for this scenario. Log an error,
147 * and presume things are otherwise fine.
148 */
149 error_report("vfio-ccw: store region read failed with errno=%d", errno);
150 return IOINST_CC_EXPECTED;
151 }
152
153 /*
154 * Selectively copy path-related bits of the SCHIB,
155 * rather than copying the entire struct.
156 */
157 s = (SCHIB *)region->schib_area;
158 schib->pmcw.pnom = s->pmcw.pnom;
159 schib->pmcw.lpum = s->pmcw.lpum;
160 schib->pmcw.pam = s->pmcw.pam;
161 schib->pmcw.pom = s->pmcw.pom;
162
163 if (s->scsw.flags & SCSW_FLAGS_MASK_PNO) {
164 schib->scsw.flags |= SCSW_FLAGS_MASK_PNO;
165 }
166
167 return IOINST_CC_EXPECTED;
168 }
169
vfio_ccw_handle_clear(SubchDev * sch)170 static int vfio_ccw_handle_clear(SubchDev *sch)
171 {
172 VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
173 struct ccw_cmd_region *region = vcdev->async_cmd_region;
174 int ret;
175
176 if (!vcdev->async_cmd_region) {
177 /* Async command region not available, fall back to emulation */
178 return -ENOSYS;
179 }
180
181 memset(region, 0, sizeof(*region));
182 region->command = VFIO_CCW_ASYNC_CMD_CSCH;
183
184 again:
185 ret = pwrite(vcdev->vdev.fd, region,
186 vcdev->async_cmd_region_size, vcdev->async_cmd_region_offset);
187 if (ret != vcdev->async_cmd_region_size) {
188 if (errno == EAGAIN) {
189 goto again;
190 }
191 error_report("vfio-ccw: write cmd region failed with errno=%d", errno);
192 ret = errno ? -errno : -EFAULT;
193 } else {
194 ret = 0;
195 }
196 switch (ret) {
197 case 0:
198 case -ENODEV:
199 case -EACCES:
200 return ret;
201 case -EFAULT:
202 default:
203 sch_gen_unit_exception(sch);
204 css_inject_io_interrupt(sch);
205 return 0;
206 }
207 }
208
vfio_ccw_handle_halt(SubchDev * sch)209 static int vfio_ccw_handle_halt(SubchDev *sch)
210 {
211 VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
212 struct ccw_cmd_region *region = vcdev->async_cmd_region;
213 int ret;
214
215 if (!vcdev->async_cmd_region) {
216 /* Async command region not available, fall back to emulation */
217 return -ENOSYS;
218 }
219
220 memset(region, 0, sizeof(*region));
221 region->command = VFIO_CCW_ASYNC_CMD_HSCH;
222
223 again:
224 ret = pwrite(vcdev->vdev.fd, region,
225 vcdev->async_cmd_region_size, vcdev->async_cmd_region_offset);
226 if (ret != vcdev->async_cmd_region_size) {
227 if (errno == EAGAIN) {
228 goto again;
229 }
230 error_report("vfio-ccw: write cmd region failed with errno=%d", errno);
231 ret = errno ? -errno : -EFAULT;
232 } else {
233 ret = 0;
234 }
235 switch (ret) {
236 case 0:
237 case -EBUSY:
238 case -ENODEV:
239 case -EACCES:
240 return ret;
241 case -EFAULT:
242 default:
243 sch_gen_unit_exception(sch);
244 css_inject_io_interrupt(sch);
245 return 0;
246 }
247 }
248
vfio_ccw_reset(DeviceState * dev)249 static void vfio_ccw_reset(DeviceState *dev)
250 {
251 VFIOCCWDevice *vcdev = VFIO_CCW(dev);
252
253 ioctl(vcdev->vdev.fd, VFIO_DEVICE_RESET);
254 }
255
vfio_ccw_crw_read(VFIOCCWDevice * vcdev)256 static void vfio_ccw_crw_read(VFIOCCWDevice *vcdev)
257 {
258 struct ccw_crw_region *region = vcdev->crw_region;
259 CRW crw;
260 int size;
261
262 /* Keep reading CRWs as long as data is returned */
263 do {
264 memset(region, 0, sizeof(*region));
265 size = pread(vcdev->vdev.fd, region, vcdev->crw_region_size,
266 vcdev->crw_region_offset);
267
268 if (size == -1) {
269 error_report("vfio-ccw: Read crw region failed with errno=%d",
270 errno);
271 break;
272 }
273
274 if (region->crw == 0) {
275 /* No more CRWs to queue */
276 break;
277 }
278
279 memcpy(&crw, ®ion->crw, sizeof(CRW));
280
281 css_crw_add_to_queue(crw);
282 } while (1);
283 }
284
vfio_ccw_req_notifier_handler(void * opaque)285 static void vfio_ccw_req_notifier_handler(void *opaque)
286 {
287 VFIOCCWDevice *vcdev = opaque;
288 Error *err = NULL;
289
290 if (!event_notifier_test_and_clear(&vcdev->req_notifier)) {
291 return;
292 }
293
294 qdev_unplug(DEVICE(vcdev), &err);
295 if (err) {
296 warn_reportf_err(err, VFIO_MSG_PREFIX, vcdev->vdev.name);
297 }
298 }
299
vfio_ccw_crw_notifier_handler(void * opaque)300 static void vfio_ccw_crw_notifier_handler(void *opaque)
301 {
302 VFIOCCWDevice *vcdev = opaque;
303
304 while (event_notifier_test_and_clear(&vcdev->crw_notifier)) {
305 vfio_ccw_crw_read(vcdev);
306 }
307 }
308
vfio_ccw_io_notifier_handler(void * opaque)309 static void vfio_ccw_io_notifier_handler(void *opaque)
310 {
311 VFIOCCWDevice *vcdev = opaque;
312 struct ccw_io_region *region = vcdev->io_region;
313 CcwDevice *ccw_dev = CCW_DEVICE(vcdev);
314 SubchDev *sch = ccw_dev->sch;
315 SCHIB *schib = &sch->curr_status;
316 SCSW s;
317 IRB irb;
318 ESW esw;
319 int size;
320
321 if (!event_notifier_test_and_clear(&vcdev->io_notifier)) {
322 return;
323 }
324
325 size = pread(vcdev->vdev.fd, region, vcdev->io_region_size,
326 vcdev->io_region_offset);
327 if (size == -1) {
328 switch (errno) {
329 case ENODEV:
330 /* Generate a deferred cc 3 condition. */
331 schib->scsw.flags |= SCSW_FLAGS_MASK_CC;
332 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
333 schib->scsw.ctrl |= (SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND);
334 goto read_err;
335 case EFAULT:
336 /* Memory problem, generate channel data check. */
337 schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
338 schib->scsw.cstat = SCSW_CSTAT_DATA_CHECK;
339 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
340 schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
341 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
342 goto read_err;
343 default:
344 /* Error, generate channel program check. */
345 schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
346 schib->scsw.cstat = SCSW_CSTAT_PROG_CHECK;
347 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
348 schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
349 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
350 goto read_err;
351 }
352 } else if (size != vcdev->io_region_size) {
353 /* Information transfer error, generate channel-control check. */
354 schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
355 schib->scsw.cstat = SCSW_CSTAT_CHN_CTRL_CHK;
356 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
357 schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
358 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
359 goto read_err;
360 }
361
362 memcpy(&irb, region->irb_area, sizeof(IRB));
363
364 /* Update control block via irb. */
365 s = schib->scsw;
366 copy_scsw_to_guest(&s, &irb.scsw);
367 schib->scsw = s;
368
369 copy_esw_to_guest(&esw, &irb.esw);
370 sch->esw = esw;
371
372 /* If a uint check is pending, copy sense data. */
373 if ((schib->scsw.dstat & SCSW_DSTAT_UNIT_CHECK) &&
374 (schib->pmcw.chars & PMCW_CHARS_MASK_CSENSE)) {
375 memcpy(sch->sense_data, irb.ecw, sizeof(irb.ecw));
376 }
377
378 read_err:
379 css_inject_io_interrupt(sch);
380 }
381
vfio_ccw_register_irq_notifier(VFIOCCWDevice * vcdev,unsigned int irq,Error ** errp)382 static bool vfio_ccw_register_irq_notifier(VFIOCCWDevice *vcdev,
383 unsigned int irq,
384 Error **errp)
385 {
386 VFIODevice *vdev = &vcdev->vdev;
387 g_autofree struct vfio_irq_info *irq_info = NULL;
388 size_t argsz;
389 int fd;
390 EventNotifier *notifier;
391 IOHandler *fd_read;
392
393 switch (irq) {
394 case VFIO_CCW_IO_IRQ_INDEX:
395 notifier = &vcdev->io_notifier;
396 fd_read = vfio_ccw_io_notifier_handler;
397 break;
398 case VFIO_CCW_CRW_IRQ_INDEX:
399 notifier = &vcdev->crw_notifier;
400 fd_read = vfio_ccw_crw_notifier_handler;
401 break;
402 case VFIO_CCW_REQ_IRQ_INDEX:
403 notifier = &vcdev->req_notifier;
404 fd_read = vfio_ccw_req_notifier_handler;
405 break;
406 default:
407 error_setg(errp, "vfio: Unsupported device irq(%d)", irq);
408 return false;
409 }
410
411 if (vdev->num_irqs < irq + 1) {
412 error_setg(errp, "vfio: IRQ %u not available (number of irqs %u)",
413 irq, vdev->num_irqs);
414 return false;
415 }
416
417 argsz = sizeof(*irq_info);
418 irq_info = g_malloc0(argsz);
419 irq_info->index = irq;
420 irq_info->argsz = argsz;
421 if (ioctl(vdev->fd, VFIO_DEVICE_GET_IRQ_INFO,
422 irq_info) < 0 || irq_info->count < 1) {
423 error_setg_errno(errp, errno, "vfio: Error getting irq info");
424 return false;
425 }
426
427 if (event_notifier_init(notifier, 0)) {
428 error_setg_errno(errp, errno,
429 "vfio: Unable to init event notifier for irq (%d)",
430 irq);
431 return false;
432 }
433
434 fd = event_notifier_get_fd(notifier);
435 qemu_set_fd_handler(fd, fd_read, NULL, vcdev);
436
437 if (!vfio_set_irq_signaling(vdev, irq, 0,
438 VFIO_IRQ_SET_ACTION_TRIGGER, fd, errp)) {
439 qemu_set_fd_handler(fd, NULL, NULL, vcdev);
440 event_notifier_cleanup(notifier);
441 }
442
443 return true;
444 }
445
vfio_ccw_unregister_irq_notifier(VFIOCCWDevice * vcdev,unsigned int irq)446 static void vfio_ccw_unregister_irq_notifier(VFIOCCWDevice *vcdev,
447 unsigned int irq)
448 {
449 Error *err = NULL;
450 EventNotifier *notifier;
451
452 switch (irq) {
453 case VFIO_CCW_IO_IRQ_INDEX:
454 notifier = &vcdev->io_notifier;
455 break;
456 case VFIO_CCW_CRW_IRQ_INDEX:
457 notifier = &vcdev->crw_notifier;
458 break;
459 case VFIO_CCW_REQ_IRQ_INDEX:
460 notifier = &vcdev->req_notifier;
461 break;
462 default:
463 error_report("vfio: Unsupported device irq(%d)", irq);
464 return;
465 }
466
467 if (!vfio_set_irq_signaling(&vcdev->vdev, irq, 0,
468 VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
469 warn_reportf_err(err, VFIO_MSG_PREFIX, vcdev->vdev.name);
470 }
471
472 qemu_set_fd_handler(event_notifier_get_fd(notifier),
473 NULL, NULL, vcdev);
474 event_notifier_cleanup(notifier);
475 }
476
vfio_ccw_get_region(VFIOCCWDevice * vcdev,Error ** errp)477 static bool vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp)
478 {
479 VFIODevice *vdev = &vcdev->vdev;
480 struct vfio_region_info *info;
481 int ret;
482
483 /* Sanity check device */
484 if (!(vdev->flags & VFIO_DEVICE_FLAGS_CCW)) {
485 error_setg(errp, "vfio: Um, this isn't a vfio-ccw device");
486 return false;
487 }
488
489 /*
490 * We always expect at least the I/O region to be present. We also
491 * may have a variable number of regions governed by capabilities.
492 */
493 if (vdev->num_regions < VFIO_CCW_CONFIG_REGION_INDEX + 1) {
494 error_setg(errp, "vfio: too few regions (%u), expected at least %u",
495 vdev->num_regions, VFIO_CCW_CONFIG_REGION_INDEX + 1);
496 return false;
497 }
498
499 ret = vfio_get_region_info(vdev, VFIO_CCW_CONFIG_REGION_INDEX, &info);
500 if (ret) {
501 error_setg_errno(errp, -ret, "vfio: Error getting config info");
502 return false;
503 }
504
505 vcdev->io_region_size = info->size;
506 if (sizeof(*vcdev->io_region) != vcdev->io_region_size) {
507 error_setg(errp, "vfio: Unexpected size of the I/O region");
508 goto out_err;
509 }
510
511 vcdev->io_region_offset = info->offset;
512 vcdev->io_region = g_malloc0(info->size);
513 g_free(info);
514
515 /* check for the optional async command region */
516 ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW,
517 VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD, &info);
518 if (!ret) {
519 vcdev->async_cmd_region_size = info->size;
520 if (sizeof(*vcdev->async_cmd_region) != vcdev->async_cmd_region_size) {
521 error_setg(errp, "vfio: Unexpected size of the async cmd region");
522 goto out_err;
523 }
524 vcdev->async_cmd_region_offset = info->offset;
525 vcdev->async_cmd_region = g_malloc0(info->size);
526 g_free(info);
527 }
528
529 ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW,
530 VFIO_REGION_SUBTYPE_CCW_SCHIB, &info);
531 if (!ret) {
532 vcdev->schib_region_size = info->size;
533 if (sizeof(*vcdev->schib_region) != vcdev->schib_region_size) {
534 error_setg(errp, "vfio: Unexpected size of the schib region");
535 goto out_err;
536 }
537 vcdev->schib_region_offset = info->offset;
538 vcdev->schib_region = g_malloc(info->size);
539 g_free(info);
540 }
541
542 ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW,
543 VFIO_REGION_SUBTYPE_CCW_CRW, &info);
544
545 if (!ret) {
546 vcdev->crw_region_size = info->size;
547 if (sizeof(*vcdev->crw_region) != vcdev->crw_region_size) {
548 error_setg(errp, "vfio: Unexpected size of the CRW region");
549 goto out_err;
550 }
551 vcdev->crw_region_offset = info->offset;
552 vcdev->crw_region = g_malloc(info->size);
553 g_free(info);
554 }
555
556 return true;
557
558 out_err:
559 g_free(vcdev->crw_region);
560 g_free(vcdev->schib_region);
561 g_free(vcdev->async_cmd_region);
562 g_free(vcdev->io_region);
563 g_free(info);
564 return false;
565 }
566
vfio_ccw_put_region(VFIOCCWDevice * vcdev)567 static void vfio_ccw_put_region(VFIOCCWDevice *vcdev)
568 {
569 g_free(vcdev->crw_region);
570 g_free(vcdev->schib_region);
571 g_free(vcdev->async_cmd_region);
572 g_free(vcdev->io_region);
573 }
574
vfio_ccw_realize(DeviceState * dev,Error ** errp)575 static void vfio_ccw_realize(DeviceState *dev, Error **errp)
576 {
577 S390CCWDevice *cdev = S390_CCW_DEVICE(dev);
578 VFIOCCWDevice *vcdev = VFIO_CCW(cdev);
579 S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(cdev);
580 VFIODevice *vbasedev = &vcdev->vdev;
581 Error *err = NULL;
582
583 /* Call the class init function for subchannel. */
584 if (cdc->realize) {
585 if (!cdc->realize(cdev, vcdev->vdev.sysfsdev, errp)) {
586 return;
587 }
588 }
589
590 if (!vfio_device_get_name(vbasedev, errp)) {
591 goto out_unrealize;
592 }
593
594 if (!vfio_attach_device(cdev->mdevid, vbasedev,
595 &address_space_memory, errp)) {
596 goto out_attach_dev_err;
597 }
598
599 if (!vfio_ccw_get_region(vcdev, errp)) {
600 goto out_region_err;
601 }
602
603 if (!vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX, errp)) {
604 goto out_io_notifier_err;
605 }
606
607 if (vcdev->crw_region) {
608 if (!vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX,
609 errp)) {
610 goto out_irq_notifier_err;
611 }
612 }
613
614 if (!vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_REQ_IRQ_INDEX, &err)) {
615 /*
616 * Report this error, but do not make it a failing condition.
617 * Lack of this IRQ in the host does not prevent normal operation.
618 */
619 warn_report_err(err);
620 }
621
622 return;
623
624 out_irq_notifier_err:
625 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_REQ_IRQ_INDEX);
626 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX);
627 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX);
628 out_io_notifier_err:
629 vfio_ccw_put_region(vcdev);
630 out_region_err:
631 vfio_detach_device(vbasedev);
632 out_attach_dev_err:
633 g_free(vbasedev->name);
634 out_unrealize:
635 if (cdc->unrealize) {
636 cdc->unrealize(cdev);
637 }
638 }
639
vfio_ccw_unrealize(DeviceState * dev)640 static void vfio_ccw_unrealize(DeviceState *dev)
641 {
642 S390CCWDevice *cdev = S390_CCW_DEVICE(dev);
643 VFIOCCWDevice *vcdev = VFIO_CCW(cdev);
644 S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(cdev);
645
646 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_REQ_IRQ_INDEX);
647 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX);
648 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX);
649 vfio_ccw_put_region(vcdev);
650 vfio_detach_device(&vcdev->vdev);
651 g_free(vcdev->vdev.name);
652
653 if (cdc->unrealize) {
654 cdc->unrealize(cdev);
655 }
656 }
657
658 static Property vfio_ccw_properties[] = {
659 DEFINE_PROP_STRING("sysfsdev", VFIOCCWDevice, vdev.sysfsdev),
660 DEFINE_PROP_BOOL("force-orb-pfch", VFIOCCWDevice, force_orb_pfch, false),
661 #ifdef CONFIG_IOMMUFD
662 DEFINE_PROP_LINK("iommufd", VFIOCCWDevice, vdev.iommufd,
663 TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *),
664 #endif
665 DEFINE_PROP_END_OF_LIST(),
666 };
667
668 static const VMStateDescription vfio_ccw_vmstate = {
669 .name = "vfio-ccw",
670 .unmigratable = 1,
671 };
672
vfio_ccw_instance_init(Object * obj)673 static void vfio_ccw_instance_init(Object *obj)
674 {
675 VFIOCCWDevice *vcdev = VFIO_CCW(obj);
676 VFIODevice *vbasedev = &vcdev->vdev;
677
678 /* CCW device is mdev type device */
679 vbasedev->mdev = true;
680
681 /*
682 * All vfio-ccw devices are believed to operate in a way compatible with
683 * discarding of memory in RAM blocks, ie. pages pinned in the host are
684 * in the current working set of the guest driver and therefore never
685 * overlap e.g., with pages available to the guest balloon driver. This
686 * needs to be set before vfio_get_device() for vfio common to handle
687 * ram_block_discard_disable().
688 */
689 vfio_device_init(vbasedev, VFIO_DEVICE_TYPE_CCW, &vfio_ccw_ops,
690 DEVICE(vcdev), true);
691 }
692
693 #ifdef CONFIG_IOMMUFD
vfio_ccw_set_fd(Object * obj,const char * str,Error ** errp)694 static void vfio_ccw_set_fd(Object *obj, const char *str, Error **errp)
695 {
696 vfio_device_set_fd(&VFIO_CCW(obj)->vdev, str, errp);
697 }
698 #endif
699
vfio_ccw_class_init(ObjectClass * klass,void * data)700 static void vfio_ccw_class_init(ObjectClass *klass, void *data)
701 {
702 DeviceClass *dc = DEVICE_CLASS(klass);
703 S390CCWDeviceClass *cdc = S390_CCW_DEVICE_CLASS(klass);
704
705 device_class_set_props(dc, vfio_ccw_properties);
706 #ifdef CONFIG_IOMMUFD
707 object_class_property_add_str(klass, "fd", NULL, vfio_ccw_set_fd);
708 #endif
709 dc->vmsd = &vfio_ccw_vmstate;
710 dc->desc = "VFIO-based subchannel assignment";
711 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
712 dc->realize = vfio_ccw_realize;
713 dc->unrealize = vfio_ccw_unrealize;
714 device_class_set_legacy_reset(dc, vfio_ccw_reset);
715
716 cdc->handle_request = vfio_ccw_handle_request;
717 cdc->handle_halt = vfio_ccw_handle_halt;
718 cdc->handle_clear = vfio_ccw_handle_clear;
719 cdc->handle_store = vfio_ccw_handle_store;
720 }
721
722 static const TypeInfo vfio_ccw_info = {
723 .name = TYPE_VFIO_CCW,
724 .parent = TYPE_S390_CCW,
725 .instance_size = sizeof(VFIOCCWDevice),
726 .instance_init = vfio_ccw_instance_init,
727 .class_init = vfio_ccw_class_init,
728 };
729
register_vfio_ccw_type(void)730 static void register_vfio_ccw_type(void)
731 {
732 type_register_static(&vfio_ccw_info);
733 }
734
735 type_init(register_vfio_ccw_type)
736